content
stringlengths
10
4.9M
# author: <NAME> # contact: <EMAIL> # date: 2021-09-12 # version: 0.0.1 import numpy as np from mollab.atom import Atom, TemplateAtom from mollab.residue import Residue, TemplateResidue import pytest @pytest.fixture def templateCH4(): # H2 # | # H1 - C - H0 # | # H3 CH4 = TemplateResidue('CH4') CH4.create_atoms(names=['C', 'H0', 'H1', 'H2', 'H3'], positions=np.array([[0, 0, 0], [1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0]], dtype=float)) CH4.addTopo({0: [1,2,3,4]}) yield CH4 @pytest.fixture def templateCO2(): C = TemplateAtom('C') O = TemplateAtom('O') CO2 = TemplateResidue('CO2') CO2.addAtoms(C, O, O) CO2.addBond(0, 1) CO2.addBond(0, 2) yield CO2 @pytest.fixture def ch4(templateCH4): return templateCH4() class TestResidue: def test_render(self, templateCO2, templateCH4): positions = np.array([[0, 0, 0], [1, 0, 0], [-1,0, 0]], dtype=float) co2 = templateCO2(positions=positions, names=['C', 'O1', 'O2']) ch4 = templateCH4() assert co2[0].name == 'C' assert ch4[0].name == 'C' def test_set(self, ch4): ch4.name = 'CH4' assert ch4.name == 'CH4' ch4.name = 'CH4' def test_get(self, ch4): assert ch4.name == 'CH4' def test_compare(self): pass def test_getitem(self, ch4): assert ch4[0].name == 'C' assert (ch4['H3'].position == np.array([0, -1, 0])).all() def test_json(self, ch4): print(ch4.toDict()) def test_hash(self, ch4): pass def test_unique(self, ch4): pass def test_linkage(self, templateCH4): ch4l = templateCH4(positions=np.array([[0, 0, 0], [1, 0, 0], [-1, 0, 0], [0, 1, 0], [0, -1, 0]], dtype=float)) ch4r = templateCH4(positions=np.array([[1, 0, 0], [2, 0, 0], [0, 0, 0], [1, 1, 0], [1, -1, 0]])) ch4r.move(np.array([1, 0, 0])) ch4l.bondto(ch4r, 'H0', 'H1', method='condensation') assert len(ch4l.atoms) == len(ch4r.atoms) == 4 assert ch4l['C'] in ch4r['C'].bondAtoms def test_move(self, ch4): ch4.move(np.array([0, 1, 0])) assert (ch4['C'].position == np.array([0, 1, 0])).all() def test_addResidues(self, templateCH4): # H2 # | # H1 - C - H0 # | # H3 # depreated: we can not manipulate atom and residue directly, # but templateAtom and templateResidue. # ch3l = templateCH4() # ch2 = templateCH4() # ch3r = templateCH4() # ch3l.bondto(ch2, 0, 1) # ch3r.bondto(ch2, 1, 0) # disambiguate #TODO: add Add multi-level support CH3CH2CH3 = TemplateResidue('CH3CH2CH3') # CH3CH2CH3.addReisudes(templateCH4, templateCH4, templateCH4)
<filename>src/utils/syncUtils/common.tsx import axios from "axios"; import { config } from "../../constants/driveList"; import OtherUtil from "../otherUtil"; import BookModel from "../../model/Book"; import localforage from "localforage"; export function getParamsFromUrl() { var hashParams: any = {}; var e, r = /([^&;=]+)=?([^&;]*)/g, q = window.location.hash.substring(2) || window.location.search.substring(1).split("#")[0]; while ((e = r.exec(q))) { hashParams[e[1]] = decodeURIComponent(e[2]); } return hashParams; } export const moveData = (blob, driveIndex, books: BookModel[] = []) => { let file = new File([blob], "moveData.zip", { lastModified: new Date().getTime(), type: blob.type, }); let formData = new FormData(); formData.append("file", file); formData.append( "dataPath", OtherUtil.getReaderConfig("storageLocation") ? OtherUtil.getReaderConfig("storageLocation") : window .require("electron") .ipcRenderer.sendSync("storage-location", "ping") ); axios .post(`${config.token_url}/move_data`, formData, { headers: { "Content-Type": "multipart/form-data", }, responseType: "blob", }) .then(async (response: any) => { if (driveIndex === 4) { let deleteBooks = books.map((item) => { return localforage.removeItem(item.key); }); await Promise.all(deleteBooks); } }) .catch(function (error: any) { console.error(error, "移动失败"); }); }; class SyncUtil { static changeLocation( oldPath: string, newPath: string, handleMessage: (message: string) => void, handleMessageBox: (isShow: boolean) => void ) { axios .post(`${config.token_url}/change_location`, { oldPath, newPath, }) .then(function (response: any) { console.log(response, "修改成功"); handleMessage("Change Successfully"); handleMessageBox(true); }) .catch(function (error: any) { console.log(error, "修改失败"); handleMessage("Change Failed"); handleMessageBox(true); }); } static syncData() {} } export default SyncUtil;
#бегать по циклу и записывать не кол-во а индекса, и потом сравнивать их # ABBAuuuABjklBA ABAXXXAB "AB BA" import math s = input() ls = len(s) sA=0 sB=0 mA=[] mB=[] for i in range(ls-1): if (s[i] == "A" and s[i+1] == "B"): #or (s[i] == "B" and s[i+1] == "A"): #print("AB=",i,i+1) mA.append(i+i+1) if (s[i] == "B" and s[i+1] == "A"): #print("BA=",i,i+1) mB.append(i+i+1) #print(mA) #print(mB) for j in range(len(mA)): for l in range(len(mB)): if math.fabs(mA[j]-mB[l]) > 2: print("YES") exit() print("NO")
#include <bits/stdc++.h> using namespace std; typedef long long int ll; const ll N=1e5+10,SQ=315; ll m[SQ+10][N],n,q,c,l,r,k,Lst,ld,rd,re,x,y,st,r2,b[N],s[SQ+10],ind[SQ+10]; vector <ll> v[SQ+10]; void upd(ll l,ll r) { ld=l/SQ; rd=r/SQ; l-=s[ld]; r-=s[rd]; if(ld-rd==0) { x=v[ld][(ind[ld]+l)%SQ]; for(int i=l+1; i<=r; i++) swap(x,v[ld][(ind[ld]+i)%SQ]); v[ld][(ind[ld]+l)%SQ]=x; return; } x=0; for(int i=l; i<SQ; i++) swap(x,v[ld][(ind[ld]+i)%SQ]); m[ld][x]--; for(int i=ld+1; i<rd; i++) { ind[i]+=SQ-1,ind[i]%=SQ; m[i][x]++; swap(v[i][ind[i]],x); m[i][x]--; } m[rd][x]++; for(int i=0; i<=r; i++) swap(v[rd][(ind[rd]+i)%SQ],x); m[rd][x]--; v[ld][(ind[ld]+l)%SQ]=x; m[ld][x]++; } void get(ll l,ll r,ll k) { ld=l/SQ; rd=r/SQ; l-=s[ld]; r-=s[rd]; if(ld-rd==0) { for(int i=l; i<=r; i++) if(v[ld][(ind[ld]+i)%SQ]==k) Lst++; return; } for(int i=l; i<SQ; i++) if(v[ld][(ind[ld]+i)%SQ]==k) Lst++; for(int i=0; i<=r; i++) if(v[rd][(ind[rd]+i)%SQ]==k) Lst++; for(int i=ld+1; i<rd; i++) Lst+=m[i][k]; } int main() { ios::sync_with_stdio(false),cin.tie(0),cout.tie(0); cin >> n; for(int i=0; i<n; i++) { cin >> x; v[i/SQ].push_back(x); m[i/SQ][x]++; } for(int i=0; i<=n/SQ; i++) s[i]=i*SQ,ind[i]=0; cin >> q; Lst=0; for(int i=1; i<=q; i++) { cin >> c >> l >> r; l=(l+Lst-1)%n; r=(r+Lst-1)%n; if(l>r) swap(l,r); if(c==1) upd(l,r); else { cin >> k; k=(k+Lst-1)%n+1; Lst=0; get(l,r,k); cout << Lst << '\n'; } } }
<gh_stars>1-10 export enum ActionTypes { CHANGE_PAGE = '[Catalog/items] Load items', } // Change page export type TChangePage = { type: ActionTypes.CHANGE_PAGE; page: number; }; export const changePage = (page: number): TChangePage => { return { type: ActionTypes.CHANGE_PAGE, page, }; }; export type ActionsAll = TChangePage;
<gh_stars>1-10 #!/usr/bin/env python # -*- coding: utf-8 -*- """ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import yaml import argparse argparser = argparse.ArgumentParser() argparser.add_argument('--cards_docker_tag', help='Switch to this tagged version of the CARDS Docker image', required=True) args = argparser.parse_args() # YAML load with open('docker-compose.yml', 'r') as f_yaml: yaml_obj = yaml.load(f_yaml.read(), Loader=yaml.SafeLoader) # Do the replacement new_cards_image = yaml_obj['services']['cardsinitial']['image'] new_cards_image = new_cards_image.split(":")[0] + ":" + args.cards_docker_tag yaml_obj['services']['cardsinitial']['image'] = new_cards_image # YAML save with open('docker-compose.yml', 'w') as f_yaml: f_yaml.write(yaml.dump(yaml_obj, default_flow_style=False)) print("Upgraded CARDS Docker image to: {}".format(args.cards_docker_tag))
/** * The game's main state. Contains most of the logic. */ #ifndef __PLAYSTATE_H__ #define __PLAYSTATE_H__ #include <base/error.h> /** Free all memory used by the playstate */ void playstate_clean(); /** Initialize and alloc the playstate */ err playstate_init(); /** Reset the playstate so it may start to run again */ err playstate_reset(); /** Update a single frame */ err playstate_update(); /** Draw a single frame */ err playstate_draw(); #endif /* __PLAYSTATE_H__ */
def _point_m_to_wkb(x, y, m): return pack('<3d', x, y, m)
SAT Heritage: A Community-Driven Effort for Archiving, Building and Running More Than Thousand SAT Solvers SAT research has a long history of source code and binary releases, thanks to competitions organized every year. However, since every cycle of competitions has its own set of rules and an adhoc way of publishing source code and binaries, compiling or even running any solver may be harder than what it seems. Moreover, there has been more than a thousand solvers published so far, some of them released in the early 90’s. If the SAT community wants to archive and be able to keep track of all the solvers that made its history, it urgently needs to deploy an important effort. We propose to initiate a community-driven effort to archive and to allow easy compilation and running of all SAT solvers that have been released so far. We rely on the best tools for archiving and building binaries (thanks to Docker, GitHub and Zenodo) and provide a consistent and easy way for this. Thanks to our tool, building (or running) a solver from its source (or from its binary) can be done in one line. Introduction As Donald Knuth wrote in , "The story of satisfiability is the tale of a triumph of software engineering". In this success story of computer science, the availability of SAT solvers source code have been crucial. Archiving and maintaining this important amount of knowledge may be as important as archiving the scientific papers that made this domain. The release of the source code of MiniSat had, for instance, a dramatic impact on the field. However, nothing has yet been done to ensure that source code and recipes to build SAT solvers will be archived in the best possible way. This is a recent but important concern in the more broadly field of computer science. The Software Heritage initiative is, for instance, a recent and strong initiative to handle this. In the domain of SAT solvers, however, collecting and archiving may not be sufficient: we must embed the recipe to build the code and to run it in the most efficient way. As input format for SAT solvers remains the same since more than 25 years , it is always possible to compare the performances of all existing solvers, given a suitable way of compiling and running them. At that time, some code was using EGCS, a fork of GCC 2.8 including more features. Facebook and Google didn't exist and Linux machines were running with kernels 1.X. Solvers were distributed with source code to be compiled on Intel or SPARC computers. Fortunately enough, binaries for Intel 386 machines distributed at that time are still executable on recent computers, given the availability of compatible libraries. Collecting and distributing SAT solvers source code is, luckily, not new. SAT competitions, organized since the beginning of the 21st century, have almost always forced the publication of the source code of submitted solvers. If source code was not distributed, binaries were often available. However, since the first competitions, the landscape of computer science has changed a lot. New technologies like Docker are now available, changing the way tools are distributed. We propose in this work to structure and bootstrap a collective effort to maintain a comprehensive and user-friendly library of all the solvers that shaped the SAT world. We build our tool, called SAT Heritage, on top of other recent tools, typically developed for archiving and distributing source code and applications, like Docker . The community is invited to contribute by archiving, from now on, all the solvers used in competitions (and papers). We also expect authors of previous solvers to contribute by adding informations about their solvers or special command lines not especially used during competitive events. Our tool allows, for instance, to add a DOI (thanks to Zenodo) to the exact version of any solver used in a paper, allowing simple but powerful references to be used. In summary, the goals of our open-source tool are to: -Collect and archive all SAT solvers, binaries and sources, -Easily retrieve a Docker image with the binary of any solver, directly from the Docker Hub, or, when source code is available, by locally building the image from the source code of the solver, -Allow to easily run any SAT solver that have ever been available (typically in the last 30 years), by a one line call (consistent over all solvers), -Open an convenient solution for reproducibility (binaries, source code and receipt to build binaries are archived in a consistent way), thanks to strong connection with tools like Guix and Zenodo. History of SAT solvers releases and publications The first SAT competitions happened in the 90's . Their goals were multiple: collect and compare SAT solvers performances in the fairest possible way, collect and distribute benchmarks, and also take a snapshot of the performances reached so far. Table 1 reports the number of SAT solvers that took part in the different competitions. We counted more than a thousand solvers, but even counting them was not an easy task: one source code can hide a number of subversions (with distinct parameters) and distinct tracks, and some information were only partially available. Date #Solvers Collection Type Contest (1) Following the ideas of these first competitions organized in the 90's, and thanks to the development of the web, the satex website published solvers and benchmarks gathered by the website maintainer. satex was running SAT solvers on only one personal computer. Some solvers were modified to comply with the input/output of the satex framework (like a normalized exit code value). It was a personal initiative, made possible by the relatively few solvers available (all solvers of the initial satex are available in our tool). During the first cycle of competitions (numbered 1 in table 1) , submitters had to compile a static binary of their solver (to prevent library dependencies) via remote access to the same computer. To ensure the deployment of their solver, this computer had the exact same Linux version as the one deployed on the cluster used to run the contest. Some solvers were coming from industry, which explains why no open source code was mandatory: the priority was to draw the most accurate picture of solvers performances. However, it was quickly decided (competitions numbered 2 in the above table) that it was even more important to require submitters to open their code. Binaries were then allowed to enter the competition, but only in the demonstration category (no prizes). More recently, thanks to the starexec environment , compilation of solvers was somehow normalized (an image of a virtual Linux machine on which the code would be built and run was distributed). With each cycle of competition or race, came its own set of rules with an ad hoc way of publishing source code and binaries, with a non uniform way of providing details on which parameters to use. For example, since 2016, solvers must provide a certificate for unsatisfiable instances . One has thus to go through all the solvers to find the correct parameters for running them without proof logging. Thus, despite the increasing importance of software archiving , the way SAT solvers are distributed had not really changed in the last 25 years. It is still mainly done via personal websites, or SAT competitions and races websites, each cycle of events defining its own rules for this. As a result, it is often unclear how to recover any SAT solver (same code, same arguments) used in many papers, old or recent. It is even more questionable whether, despite the importance of SAT solvers source code, we are able to correctly archive and maintain them. SAT Heritage Docker images The SAT Heritage project provides a centralized repository of instructions to build and execute the SAT solvers involved in competitions since the early ages of SAT. To that aim, it relies on Docker images which are self-contained Linuxbased environments to execute binaries. Docker allows to explicitly mention all the packages needed to compile the source code and to build a temporary image (the "builder") for compiling the solver. Then, the compiled solver is embedded in another, lighter, image which contains only the libraries required to execute it. So, each version of each collected solver is made available in a dedicated Docker image. Thanks to the layer structure of images, all solvers sharing the same environment will share the major part of the image content, thus substantially saving disk space. At the end, the Docker image will not be much heavier than the binary of the solver. Docker images can be executed on usual operating systems. On Linux, Docker offers the same performance as native binaries: only filesystem and network operations have a slight overhead due to the isolation , which is not of concern for SAT solvers. On other systems, the images are executed within a virtual machine, adding a noticeable performance overhead, although considerably reduced on recent hardware . Architecture The instructions to build and run the collected solvers are hosted publicly on GitHub , on which the community is invited to contribute. The solvers are typically grouped by year of competition. Images are then named as satex/<solver-name>:<year>. The images are built by compiling solver sources whenever available. The compiling environment matches with a Linux distribution of the time of the competition. We selected the Debian GNU/Linux distribution which provides Docker images for each of its version since 2000. For instance, the solvers from the 2000 competition are built using the Debian "Potato" as it was back at that time. In principle, each solver can have its own recipe and environment for building and execution. Nevertheless, we managed to devise Docker recipes compatible with several generations of competitions. The architecture of the repository also allows custom sets of solvers. For example, the SAT Heritage collection includes the different Knuth's solvers or solvers with Java or Python. The image building Docker recipes indicate where to download the sources or the binaries whenever the former are not available. At the time of the writing of this article, most recipes use URL from the website of the SAT competitions. In order to provide as most as persistent locations as possible, we are regularly moving more resources on Zenodo services to host sources and binaries in a near future (currently, only the binaries of the original satex and the 2002's competition are hosted on it). The images can be built locally from the git repository, and are also available for download from the main public Docker repository , that distributes "official" binaries of solvers. This allows to directly run any collected (or compiled) solver very quickly. Running solvers We provide a Python script, called satex, which eases the execution and management of available Docker images, although images can be directly run without it. The script can be installed using pip utility: pip3 install -U satex The list of available solvers can be fetched using the command satex list. We provide a generic wrapper in each image giving a unified mean to invoke the solver: a DIMACS file (possibly gzipped) as first argument, and optionally an output file for the proof: # run a solver on a cnf file satex run cadical:2019 file.cnf # run and produce a proof satex run glucose:2019 file.cnf proof The satex info command gives, together with general information on the solver and the image environment, the specific options used for the run. Alternatively, custom options can be used with the satex run-raw command. If the image has not been built locally, it will attempt to fetch it from the online Docker repository. See the satex -h for other available commands, such as extracting binaries out of Docker images and invoking shells within a given image. Building and adding new solvers The building of images, which involve the compilation of the solvers when possible, also relies on Docker images, and thus only requires Docker and Python for the satex command. The following command, executed at the root of the sat-heritage/docker-images repository, will build the matching solvers with their adequate recipe: Sets of solvers are added by specifying which Docker recipes to use for building the images and how to invoke the individual solvers. Managing sets of solvers allows sharing common configurations (such as linux distributions, compilers and so on) for docker images. A complete and up-to-date documentation can be found in the README file of the repository. Reproducibility is a corner stone of science. In computer science, it recently appealed for significant efforts by researchers, institutions and companies to devise good practices and provide adequate infrastructures. Among the numerous initiatives, Software Heritage and Zenodo are probably the most important efforts for archiving source code, repositories, datasets, and binaries, for which they provide persistent storage, URLs, and references (DOI). Another example is the GitHub Archive Program, a repository on a 500-years lifespan storage preserved in the Artic World Archive . Created more recently, the Guix initiative aims at keeping the details of any Linux machine configuration, thanks to a declarative system configuration. External URL used for building any image are also archived. Our tool produces Docker images that can be easily frozen thanks to Guix, by building Guix images from the Dockerfile recipe. It is also worth mentioning that Guix has strong connections with Software Heritage and GitHub. If we look at reproducibility of SAT solvers experiments on a longer time scale, we can expect that, some day, current binaries (for i386) will not genuinely run on computers any more. We can expect, however, that there will be i386 emulators. Once such an emulator is set up, we can also expect Docker to be available on it, and then all the images we built will be handled natively. If not, as Docker recipes are plain text, it will be easy to convert them to another framework. Therefore, facilitating the accessibility of software in time now boils down to simple habits, such as using source versioning platforms, taking advantage of services like Zenodo or Software Heritage to freeze packages dependencies, source code, binaries, and benchmarks, and provide Docker images to give both environments and recipes to build and run your software. Conclusion We presented a tool for easily archiving and running all SAT solvers produced so far. Such a tool is needed because of (1) source code and experiments are crucial for the SAT community and (2) there are already too many SAT solvers produced so far, with many different ways of publishing sources. In order to complete our tool we think at further improvements, like including Docker images for compiling SAT solvers for other architectures than i386 (ARM for instance), but also initiating another important effort for the community: including Docker images for benchmarks generations and maintenance. Many benchmarks are combinatoric ones, typically generated by short programs. These generators are generally not distributed by the different competitive events and may contain a lot of information on the structure of the generated problems. We also think that our tool could be very interesting for SAT solvers configurations and easy cloud-deployment in a portfolio way. We also expect our work to give the community the best possible habits for state of the art archiving and reproducibility practices.
/** * Requests a connection from each registered database, and runs the isValid check. * * @return A Map of registered databases to their status. True indicates the given database connection is valid, false indicates * the application was unable to communicate with the given database. */ public Map<String, Boolean> testConnections() { Map<String, Boolean> lTestResults = new HashMap<>(); for (Map.Entry<String, HikariDataSource> lDatabase : mDatabaseConnectionPoolMapping.entrySet()) { String lDatabaseIdentifier = lDatabase.getKey() + ": " + lDatabase.getValue().getJdbcUrl(); boolean lIsValid; try (Connection lConnection = lDatabase.getValue().getConnection()) { lIsValid = lConnection.isValid(5); } catch (SQLException ex) { Logger.getRootLogger().warn("Exception when testing database connection '" + lDatabaseIdentifier + "'.", ex); lIsValid = false; } lTestResults.put(lDatabaseIdentifier, lIsValid); } return lTestResults; }
package com.adam.food.domain.foodlist; import com.google.gson.annotations.Expose; import com.google.gson.annotations.SerializedName; import java.util.ArrayList; import java.util.List; /** * Created by adamlee on 2016/3/15. */ public class TgFoodListWrapper { @SerializedName("status") @Expose private Boolean status; @SerializedName("total") @Expose private Integer total; @SerializedName("tngou") @Expose private List<TgFoodList> tngou = new ArrayList<TgFoodList>(); /** * * @return * The status */ public Boolean getStatus() { return status; } /** * * @param status * The status */ public void setStatus(Boolean status) { this.status = status; } /** * * @return * The total */ public Integer getTotal() { return total; } /** * * @param total * The total */ public void setTotal(Integer total) { this.total = total; } /** * * @return * The tngou */ public List<TgFoodList> getTngou() { return tngou; } /** * * @param tngou * The tngou */ public void setTngou(List<TgFoodList> tngou) { this.tngou = tngou; } }
// Copyright 2021 The Rode Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package policy import ( "context" "errors" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" immocks "github.com/rode/es-index-manager/mocks" "github.com/rode/grafeas-elasticsearch/go/v1beta1/storage/esutil" "github.com/rode/grafeas-elasticsearch/go/v1beta1/storage/filtering" "github.com/rode/rode/pkg/constants" "google.golang.org/grpc/codes" "google.golang.org/protobuf/encoding/protojson" "google.golang.org/protobuf/types/known/timestamppb" "github.com/rode/grafeas-elasticsearch/go/v1beta1/storage/esutil/esutilfakes" "github.com/rode/grafeas-elasticsearch/go/v1beta1/storage/filtering/filteringfakes" "github.com/rode/rode/config" pb "github.com/rode/rode/proto/v1alpha1" ) var _ = Describe("PolicyGroupManager", func() { var ( manager PolicyGroupManager ctx = context.Background() expectedPolicyGroupsAlias string esClient *esutilfakes.FakeClient esConfig *config.ElasticsearchConfig indexManager *immocks.FakeIndexManager filterer *filteringfakes.FakeFilterer ) BeforeEach(func() { esClient = &esutilfakes.FakeClient{} esConfig = randomEsConfig() indexManager = &immocks.FakeIndexManager{} filterer = &filteringfakes.FakeFilterer{} expectedPolicyGroupsAlias = fake.LetterN(10) indexManager.AliasNameReturns(expectedPolicyGroupsAlias) manager = NewPolicyGroupManager(logger, esClient, esConfig, indexManager, filterer) }) Context("CreatePolicyGroup", func() { var ( policyGroupName string createPolicyRequest *pb.PolicyGroup actualPolicyGroup *pb.PolicyGroup getPolicyGroupResponse *esutil.EsGetResponse getPolicyGroupError error createPolicyGroupError error actualError error ) BeforeEach(func() { policyGroupName = fake.Word() createPolicyRequest = randomPolicyGroup(policyGroupName) getPolicyGroupResponse = &esutil.EsGetResponse{ Id: policyGroupName, Found: false, } getPolicyGroupError = nil createPolicyGroupError = nil }) JustBeforeEach(func() { esClient.GetReturns(getPolicyGroupResponse, getPolicyGroupError) esClient.CreateReturns("", createPolicyGroupError) actualPolicyGroup, actualError = manager.CreatePolicyGroup(ctx, deepCopyPolicyGroup(createPolicyRequest)) }) It("should check to see if the policy name is in use", func() { Expect(esClient.GetCallCount()).To(Equal(1)) _, actualRequest := esClient.GetArgsForCall(0) Expect(actualRequest.Index).To(Equal(expectedPolicyGroupsAlias)) Expect(actualRequest.DocumentId).To(Equal(policyGroupName)) }) It("should create the policy group document", func() { Expect(esClient.CreateCallCount()).To(Equal(1)) _, actualRequest := esClient.CreateArgsForCall(0) Expect(actualRequest.DocumentId).To(Equal(policyGroupName)) Expect(actualRequest.Index).To(Equal(expectedPolicyGroupsAlias)) Expect(actualRequest.Refresh).To(Equal(esConfig.Refresh.String())) actualMessage := actualRequest.Message.(*pb.PolicyGroup) Expect(actualMessage.Name).To(Equal(policyGroupName)) Expect(actualMessage.Description).To(Equal(createPolicyRequest.Description)) }) It("should not return an error", func() { Expect(actualError).NotTo(HaveOccurred()) }) It("should return the new policy", func() { Expect(actualPolicyGroup.Name).To(Equal(policyGroupName)) Expect(actualPolicyGroup.Description).To(Equal(createPolicyRequest.Description)) Expect(actualPolicyGroup.Created.IsValid()).To(BeTrue()) Expect(actualPolicyGroup.Updated.IsValid()).To(BeTrue()) }) When("the name is invalid", func() { BeforeEach(func() { createPolicyRequest.Name = fake.URL() }) It("should return an error", func() { Expect(actualPolicyGroup).To(BeNil()) Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.InvalidArgument)) }) It("should not insert the policy group", func() { Expect(esClient.CreateCallCount()).To(Equal(0)) }) }) When("a policy group with that name already exists", func() { BeforeEach(func() { getPolicyGroupResponse.Found = true policyGroup := randomPolicyGroup(policyGroupName) policyGroupJson, _ := protojson.Marshal(policyGroup) getPolicyGroupResponse.Source = policyGroupJson }) It("should return an error", func() { Expect(actualPolicyGroup).To(BeNil()) Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.AlreadyExists)) }) It("should not insert the policy group", func() { Expect(esClient.CreateCallCount()).To(Equal(0)) }) }) When("an error occurs while checking if there's an existing policy group by that name", func() { BeforeEach(func() { getPolicyGroupError = errors.New("get policy group error") }) It("should return an error", func() { Expect(actualPolicyGroup).To(BeNil()) Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.Internal)) }) It("should not insert the policy group", func() { Expect(esClient.CreateCallCount()).To(Equal(0)) }) }) When("an error occurs creating the document in Elasticsearch", func() { BeforeEach(func() { createPolicyGroupError = errors.New("create error") }) It("should return an error", func() { Expect(actualPolicyGroup).To(BeNil()) Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.Internal)) }) }) }) Context("ListPolicyGroups", func() { var ( request *pb.ListPolicyGroupsRequest actualResponse *pb.ListPolicyGroupsResponse actualError error policyGroupCount int expectedPolicyGroups []*pb.PolicyGroup expectedFilterQuery *filtering.Query expectedFilterError error searchResponse *esutil.SearchResponse searchError error ) BeforeEach(func() { request = &pb.ListPolicyGroupsRequest{} policyGroupCount = fake.Number(2, 5) expectedPolicyGroups = []*pb.PolicyGroup{} searchResponse = &esutil.SearchResponse{ Hits: &esutil.EsSearchResponseHits{}, } searchError = nil for i := 0; i < policyGroupCount; i++ { policyGroupName := fake.Word() policyGroup := randomPolicyGroup(policyGroupName) expectedPolicyGroups = append(expectedPolicyGroups, policyGroup) policyGroupJson, _ := protojson.Marshal(policyGroup) searchResponse.Hits.Hits = append(searchResponse.Hits.Hits, &esutil.EsSearchResponseHit{ ID: policyGroupName, Source: policyGroupJson, }) } expectedFilterQuery = nil expectedFilterError = nil }) JustBeforeEach(func() { filterer.ParseExpressionReturns(expectedFilterQuery, expectedFilterError) esClient.SearchReturns(searchResponse, searchError) actualResponse, actualError = manager.ListPolicyGroups(ctx, request) }) It("should issue a search for all undeleted policy groups", func() { Expect(filterer.ParseExpressionCallCount()).To(Equal(0)) Expect(esClient.SearchCallCount()).To(Equal(1)) _, actualRequest := esClient.SearchArgsForCall(0) Expect(actualRequest.Index).To(Equal(expectedPolicyGroupsAlias)) Expect(actualRequest.Pagination).To(BeNil()) Expect(actualRequest.Search.Sort["created"]).To(Equal(esutil.EsSortOrderDescending)) Expect(*actualRequest.Search.Query.Bool.Must).To(HaveLen(1)) actualQuery := (*actualRequest.Search.Query.Bool.Must)[0].(*filtering.Query) Expect((*actualQuery.Term)["deleted"]).To(Equal("false")) }) It("should return the policy groups", func() { Expect(actualResponse).NotTo(BeNil()) Expect(actualResponse.PolicyGroups).To(ConsistOf(expectedPolicyGroups)) }) It("should not return an error", func() { Expect(actualError).NotTo(HaveOccurred()) }) When("a filter is applied", func() { var expectedFilter string BeforeEach(func() { expectedFilter = fake.Word() request.Filter = expectedFilter expectedFilterQuery = &filtering.Query{ Term: &filtering.Term{ fake.Word(): fake.Word(), }, } }) It("should parse the filter expression", func() { Expect(filterer.ParseExpressionCallCount()).To(Equal(1)) actualFilter := filterer.ParseExpressionArgsForCall(0) Expect(actualFilter).To(Equal(expectedFilter)) }) It("should include the query in the search", func() { Expect(esClient.SearchCallCount()).To(Equal(1)) _, actualRequest := esClient.SearchArgsForCall(0) Expect(*actualRequest.Search.Query.Bool.Must).To(HaveLen(2)) Expect((*actualRequest.Search.Query.Bool.Must)[1]).To(Equal(expectedFilterQuery)) }) When("an error occurs parsing the filter expression", func() { BeforeEach(func() { expectedFilterError = errors.New("parse error") }) It("should return an error", func() { Expect(actualResponse).To(BeNil()) Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.Internal)) }) It("should not query Elasticsearch", func() { Expect(esClient.SearchCallCount()).To(Equal(0)) }) }) }) When("pagination options are specified", func() { var ( pageToken string pageSize int32 nextPageToken string ) BeforeEach(func() { nextPageToken = fake.Word() pageSize = int32(fake.Number(10, 100)) pageToken = fake.Word() request.PageSize = pageSize request.PageToken = pageToken searchResponse.NextPageToken = nextPageToken }) It("should include the page size and token in the search request", func() { Expect(esClient.SearchCallCount()).To(Equal(1)) _, actualRequest := esClient.SearchArgsForCall(0) Expect(actualRequest.Pagination.Token).To(Equal(pageToken)) Expect(actualRequest.Pagination.Size).To(BeEquivalentTo(pageSize)) }) It("should return the next page token", func() { Expect(actualResponse.NextPageToken).To(Equal(nextPageToken)) }) }) When("there are no policy groups", func() { BeforeEach(func() { searchResponse.Hits.Hits = nil }) It("should return an empty list", func() { Expect(actualResponse.PolicyGroups).To(BeEmpty()) }) }) When("an error occurs searching for policy groups", func() { BeforeEach(func() { searchError = errors.New("search error") }) It("should return an error", func() { Expect(actualResponse).To(BeNil()) Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.Internal)) }) }) When("a policy group document is malformed", func() { BeforeEach(func() { randomIndex := fake.Number(0, policyGroupCount-1) searchResponse.Hits.Hits[randomIndex].Source = invalidJson }) It("should return an error", func() { Expect(actualResponse).To(BeNil()) Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.Internal)) }) }) }) Context("GetPolicyGroup", func() { var ( policyGroupName string expectedPolicyGroup *pb.PolicyGroup getPolicyGroupResponse *esutil.EsGetResponse getPolicyGroupError error actualPolicyGroup *pb.PolicyGroup actualError error ) BeforeEach(func() { policyGroupName = fake.Word() expectedPolicyGroup = randomPolicyGroup(policyGroupName) policyGroupJson, _ := protojson.Marshal(expectedPolicyGroup) getPolicyGroupResponse = &esutil.EsGetResponse{ Id: policyGroupName, Found: true, Source: policyGroupJson, } getPolicyGroupError = nil }) JustBeforeEach(func() { esClient.GetReturns(getPolicyGroupResponse, getPolicyGroupError) actualPolicyGroup, actualError = manager.GetPolicyGroup(ctx, &pb.GetPolicyGroupRequest{Name: policyGroupName}) }) It("should get the alias name from the index manager", func() { Expect(indexManager.AliasNameCallCount()).To(Equal(1)) documentKind, inner := indexManager.AliasNameArgsForCall(0) Expect(documentKind).To(Equal(constants.PolicyGroupsDocumentKind)) Expect(inner).To(BeEmpty()) }) It("should fetch the policy group from Elasticsearch", func() { Expect(esClient.GetCallCount()).To(Equal(1)) _, actualRequest := esClient.GetArgsForCall(0) Expect(actualRequest.Index).To(Equal(expectedPolicyGroupsAlias)) Expect(actualRequest.DocumentId).To(Equal(policyGroupName)) }) It("should return the policy group", func() { Expect(actualPolicyGroup).NotTo(BeNil()) Expect(actualPolicyGroup.Name).To(Equal(policyGroupName)) Expect(actualPolicyGroup.Description).To(Equal(expectedPolicyGroup.Description)) Expect(actualPolicyGroup.Created.IsValid()).To(BeTrue()) Expect(actualPolicyGroup.Updated.IsValid()).To(BeTrue()) }) It("should not return an error", func() { Expect(actualError).NotTo(HaveOccurred()) }) When("the name is empty", func() { BeforeEach(func() { policyGroupName = "" }) It("should return an error", func() { Expect(actualPolicyGroup).To(BeNil()) Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.InvalidArgument)) }) It("should not try to fetch the policy group document", func() { Expect(esClient.GetCallCount()).To(Equal(0)) }) }) When("an error occurs fetching the policy group document", func() { BeforeEach(func() { getPolicyGroupError = errors.New("get policy group error") }) It("should return an error", func() { Expect(actualPolicyGroup).To(BeNil()) Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.Internal)) }) }) When("the policy group document cannot be deserialized", func() { BeforeEach(func() { getPolicyGroupResponse.Source = invalidJson }) It("should return an error", func() { Expect(actualPolicyGroup).To(BeNil()) Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.Internal)) }) }) When("the policy group document is not found", func() { BeforeEach(func() { getPolicyGroupResponse.Found = false }) It("should return an error", func() { Expect(actualPolicyGroup).To(BeNil()) Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.NotFound)) }) }) }) Context("UpdatePolicyGroup", func() { var ( policyGroupName string existingPolicyGroup *pb.PolicyGroup updatedPolicyGroup *pb.PolicyGroup actualPolicyGroup *pb.PolicyGroup actualError error getPolicyGroupResponse *esutil.EsGetResponse getPolicyGroupError error updatePolicyGroupError error ) BeforeEach(func() { policyGroupName = fake.Word() existingPolicyGroup = randomPolicyGroup(policyGroupName) updatedPolicyGroup = deepCopyPolicyGroup(existingPolicyGroup) updatedPolicyGroup.Description = fake.Sentence(5) policyGroupJson, _ := protojson.Marshal(existingPolicyGroup) getPolicyGroupResponse = &esutil.EsGetResponse{ Id: policyGroupName, Found: true, Source: policyGroupJson, } getPolicyGroupError = nil updatePolicyGroupError = nil }) JustBeforeEach(func() { esClient.GetReturns(getPolicyGroupResponse, getPolicyGroupError) esClient.UpdateReturns(nil, updatePolicyGroupError) actualPolicyGroup, actualError = manager.UpdatePolicyGroup(ctx, deepCopyPolicyGroup(updatedPolicyGroup)) }) It("should fetch the current policy group", func() { Expect(esClient.GetCallCount()).To(Equal(1)) _, actualRequest := esClient.GetArgsForCall(0) Expect(actualRequest.Index).To(Equal(expectedPolicyGroupsAlias)) Expect(actualRequest.DocumentId).To(Equal(policyGroupName)) }) It("should update Elasticsearch with the new description", func() { Expect(esClient.UpdateCallCount()).To(Equal(1)) _, actualRequest := esClient.UpdateArgsForCall(0) Expect(actualRequest.DocumentId).To(Equal(policyGroupName)) Expect(actualRequest.Index).To(Equal(expectedPolicyGroupsAlias)) Expect(actualRequest.Refresh).To(Equal(esConfig.Refresh.String())) actualMessage := actualRequest.Message.(*pb.PolicyGroup) Expect(actualMessage.Name).To(Equal(policyGroupName)) Expect(actualMessage.Description).To(Equal(updatedPolicyGroup.Description)) Expect(actualMessage.Updated.IsValid()).To(BeTrue()) }) It("should return the updated policy group", func() { Expect(actualPolicyGroup).NotTo(BeNil()) Expect(actualPolicyGroup.Name).To(Equal(policyGroupName)) Expect(actualPolicyGroup.Description).To(Equal(updatedPolicyGroup.Description)) Expect(actualPolicyGroup.Updated.IsValid()).To(BeTrue()) }) It("should not return an error", func() { Expect(actualError).NotTo(HaveOccurred()) }) When("an error occurs fetching the policy group", func() { BeforeEach(func() { getPolicyGroupError = errors.New("get error") }) It("should return an error", func() { Expect(actualPolicyGroup).To(BeNil()) Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.Internal)) }) It("should not try to update the policy group", func() { Expect(esClient.UpdateCallCount()).To(Equal(0)) }) }) When("an error occurs updating the policy group", func() { BeforeEach(func() { updatePolicyGroupError = errors.New("update error") }) It("should return an error", func() { Expect(actualPolicyGroup).To(BeNil()) Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.Internal)) }) }) When("the policy group has been deleted", func() { BeforeEach(func() { existingPolicyGroup.Deleted = true policyGroupJson, _ := protojson.Marshal(existingPolicyGroup) getPolicyGroupResponse.Source = policyGroupJson }) It("should return an error", func() { Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.FailedPrecondition)) }) It("should not allow the update", func() { Expect(esClient.UpdateCallCount()).To(Equal(0)) }) }) }) Context("DeletePolicyGroup", func() { var ( policyGroupName string existingPolicyGroup *pb.PolicyGroup actualError error getPolicyGroupResponse *esutil.EsGetResponse getPolicyGroupError error updatePolicyGroupError error ) BeforeEach(func() { policyGroupName = fake.Word() existingPolicyGroup = randomPolicyGroup(policyGroupName) policyGroupJson, _ := protojson.Marshal(existingPolicyGroup) getPolicyGroupResponse = &esutil.EsGetResponse{ Id: policyGroupName, Found: true, Source: policyGroupJson, } getPolicyGroupError = nil updatePolicyGroupError = nil }) JustBeforeEach(func() { esClient.GetReturns(getPolicyGroupResponse, getPolicyGroupError) esClient.UpdateReturns(nil, updatePolicyGroupError) _, actualError = manager.DeletePolicyGroup(ctx, &pb.DeletePolicyGroupRequest{Name: policyGroupName}) }) It("should find the current policy group", func() { Expect(esClient.GetCallCount()).To(Equal(1)) _, actualRequest := esClient.GetArgsForCall(0) Expect(actualRequest.DocumentId).To(Equal(policyGroupName)) Expect(actualRequest.Index).To(Equal(expectedPolicyGroupsAlias)) }) It("should set the deleted flag to true", func() { existingPolicyGroup.Deleted = true Expect(esClient.UpdateCallCount()).To(Equal(1)) _, actualRequest := esClient.UpdateArgsForCall(0) Expect(actualRequest.Index).To(Equal(expectedPolicyGroupsAlias)) Expect(actualRequest.DocumentId).To(Equal(policyGroupName)) Expect(actualRequest.Refresh).To(Equal(esConfig.Refresh.String())) Expect(actualRequest.Message).To(Equal(existingPolicyGroup)) }) It("should not return an error", func() { Expect(actualError).NotTo(HaveOccurred()) }) When("an error occurs fetching the existing policy group", func() { BeforeEach(func() { getPolicyGroupError = errors.New("get error") }) It("should return an error", func() { Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.Internal)) }) It("should not delete the policy group", func() { Expect(esClient.UpdateCallCount()).To(Equal(0)) }) }) When("an error occurs deleting the policy group", func() { BeforeEach(func() { updatePolicyGroupError = errors.New("update error") }) It("should return an error", func() { Expect(actualError).To(HaveOccurred()) Expect(getGRPCStatusFromError(actualError).Code()).To(Equal(codes.Internal)) }) }) }) }) func randomPolicyGroup(name string) *pb.PolicyGroup { return &pb.PolicyGroup{ Name: name, Description: fake.Sentence(5), Deleted: false, Created: timestamppb.New(fake.Date()), Updated: timestamppb.New(fake.Date()), } } func deepCopyPolicyGroup(group *pb.PolicyGroup) *pb.PolicyGroup { return &pb.PolicyGroup{ Name: group.Name, Description: group.Description, } }
package task; import org.processmining.plugins.declareanalyzer.AnalysisResult; import org.processmining.plugins.declareanalyzer.Tester; import javafx.concurrent.Task; public class DeclareAnalyzerTask extends Task<AnalysisResult>{ private String logFile; private String modelFile; public DeclareAnalyzerTask(String log, String model) { this.logFile = log; this.modelFile = model; } @Override protected AnalysisResult call() throws Exception { // TODO Auto-generated method stub System.out.println("Conformance checking starts millis: "+System.currentTimeMillis()); return Tester.run(logFile, modelFile); } }
/** @file @author <EMAIL> / project daisy bell @details license: MIT */ #include "src/OpenType.h" GlyphOutline GlyphOutline_A() { // ** //! @todo flags repeat, Coodinates SHORT_VECTOR GlyphOutline outline = {0}; GlyphClosePath cpath0 = {0}; GlyphAnchorPoint apoints0[] = { {{ 50, 100},}, {{ 250, 600},}, {{ 450, 100},}, {{ 250, 180},}, }; GlyphClosePath_addAnchorPoints(&cpath0, apoints0, sizeof(apoints0) / sizeof(apoints0[0])); GlyphOutline_addClosePath(&outline, &cpath0); return outline; } int main(int argc, char **argv) { /** 第1引数でフォントファイル名を指定する */ if(argc < 2){ return 1; } const char *fontname = argv[1]; int baseline = 300; /** CFF(OpenType)(MSSPEC)の要求する以下の必須テーブルを作成していく。 cmap, head, hhea, hmtx, maxp, name, OS/2, post TrueType・CFF共通の必須テーブル TrueType(AppleSPEC) cmap, glyph, head, hhea, hmtx, maxp, loca, maxp, name, post を作成していく。 */ /** 'head' Table */ BBox bBox = BBox_generate(50, 450, - baseline, 1000 - baseline); // 値は一応のデザインルールから仮の値 HeadTable headTable; HeadTableFlagsElement flags = (HeadTableFlagsElement)(0x0 //| HeadTableFlagsElement_Bit0_isBaselineAtYIsZero | HeadTableFlagsElement_Bit1_isLeftSidebearingPointAtXIsZero //| HeadTableFlagsElement_Bit3_isPpemScalerMath //| HeadTableFlagsElement_Bit13_isClearType ); ASSERT(HeadTable_init( &headTable, 0x00010000, flags, LONGDATETIMEType_generate(timeFromStr("2019-01-01T00:00:00+00:00")), LONGDATETIMEType_generate(timeFromStr("2019-01-01T00:00:00+00:00")), (MacStyle)MacStyle_Bit6_Regular, bBox, 8 )); /** 'name' Table */ NameTableBuf nameTableBuf = NameTableBuf_init( "(c)Copyright the project daisy bell 2019", //"©Copyright the project daisy bell 2019", fontname, (MacStyle)MacStyle_Bit6_Regular, "Version 1.0", "project daisy bell", "MichinariNukazawa", "https://daisy-bell.booth.pm/", "https://twitter.com/MNukazawa" ); /** 'glyf' Table and 'loca' Table (glyph descriptions offset) */ GlyphTablesBuf glyphTablesBuf; GlyphTablesBuf_init(&glyphTablesBuf); HheaTable hheaTable = {0}; HmtxTableBuf hmtxTableBuf = {0}; size_t advanceWidth = 500; size_t lsb = 50; { // ** .notdefなどデフォルトの文字を追加 // & CmapTableテーブルにGlyphIdの初期値をセット //! @note Format0のBackspaceなどへのGlyphIdの割り当てはFontForgeの出力ファイルに倣った // *** .notdef GlyphDescriptionBuf glyphDescriptionBuf_notdef = {0}; GlyphOutline outline_notdef = GlyphOutline_Notdef(); GlyphDescriptionBuf_setOutline(&glyphDescriptionBuf_notdef, &outline_notdef); GlyphTablesBuf_appendSimpleGlyph(&glyphTablesBuf, 0x0, &glyphDescriptionBuf_notdef); HmtxTableBuf_appendLongHorMetric(&hmtxTableBuf, advanceWidth, lsb); // 下の2つのGlyphで使用する空の字形 GlyphDescriptionBuf glyphDescriptionBuf_empty = {0}; GlyphOutline outline_empty = {0}; GlyphDescriptionBuf_setOutline(&glyphDescriptionBuf_empty, &outline_empty); // *** NUL and other GlyphTablesBuf_appendSimpleGlyph(&glyphTablesBuf, 0, &glyphDescriptionBuf_empty); glyphTablesBuf.cmapSubtableBuf_GlyphIdArray8[ 8] = 1; // BackSpace = index 1 glyphTablesBuf.cmapSubtableBuf_GlyphIdArray8[29] = 1; // GroupSeparator = index 1 HmtxTableBuf_appendLongHorMetric(&hmtxTableBuf, 0, 0); // *** TAB(HT) and other GlyphTablesBuf_appendSimpleGlyph(&glyphTablesBuf, '\t', &glyphDescriptionBuf_empty); glyphTablesBuf.cmapSubtableBuf_GlyphIdArray8[13] = 1; // CR = index 2 HmtxTableBuf_appendLongHorMetric(&hmtxTableBuf, 1000, 0); // ** 目的の字形・文字を追加していく GlyphDescriptionBuf glyphDescriptionBuf_A = {0}; GlyphOutline outline_A = GlyphOutline_A(); GlyphDescriptionBuf_setOutline(&glyphDescriptionBuf_A, &outline_A); GlyphTablesBuf_appendSimpleGlyph(&glyphTablesBuf, 'A', &glyphDescriptionBuf_A); HmtxTableBuf_appendLongHorMetric(&hmtxTableBuf, advanceWidth, lsb); // ** 追加終了して集計・ByteArray化する。 GlyphTablesBuf_finally(&glyphTablesBuf); } { size_t ascender = 1000 - baseline; size_t descender = baseline; size_t lineGap = 24; size_t minLeftSideBearing = lsb; size_t minRightSideBearing = lsb; size_t xMaxExtent = lsb + (bBox.xMax - bBox.xMin); HheaTable_init( &hheaTable, ascender, descender, lineGap, hmtxTableBuf.advanceWidthMax, minLeftSideBearing, minRightSideBearing, xMaxExtent, hmtxTableBuf.numberOfHMetrics); } HmtxTableBuf_finally(&hmtxTableBuf); /** 'maxp' Table: 使用グリフ数。 TrueType必須Table。 */ #if 0 MaxpTable_Version05 maxpTable_Version05; ASSERT(MaxpTable_Version05_init(&maxpTable_Version05, glyphTablesBuf.numGlyphs)); #endif #if 1 MaxpTable_Version10 maxpTable_Version10 = { .version = (FixedType)htonl(0x00010000), .numGlyphs = htons(glyphTablesBuf.numGlyphs), .maxPoints = htons(8), // @todo 以下はFF由来の仮の固定値 .maxContours = htons(2), .maxCompositePoints = htons(0), .maxCompositeContours = htons(0), .maxZones = htons(2), .maxTwilightPoints = htons(0), .maxStorage = htons(1), .maxFunctionDefs = htons(1), .maxInstructionDefs = htons(0), .maxStackElements = htons(64), .maxSizeOfInstructions = htons(0), .maxComponentElements = htons(0), .maxComponentDepth = htons(0), }; #endif /** 'post' Table: PostScriptエンジン(プリンタ等)が使用する参考情報 */ PostTable_Header postTable = { .version = htonl(0x00030000), .italicAngle = htonl(0x00000000), .underlinePosition = htons(-125), .underlineThickness = htons(50), .isFixedPitch = htonl(0x00000001), }; /** TableDiectoryを生成しつつ、Tableをバイト配列に変換して繋げていく。 Tableにはパディングを入れる。 TableDirectoryを作っていく(Table情報の配列)。 OffsetTable生成時に必要なテーブル数を数えておく。 */ Tablebuf tableBuf; Tablebuf_init(&tableBuf); //Tablebuf_appendTable(&tableBuf, (void *)(&offsetTable), sizeof(OffsetTable)); Tablebuf_appendTable(&tableBuf, "head", (void *)(&headTable), sizeof(HeadTable)); Tablebuf_appendTable(&tableBuf, "name", (void *)(nameTableBuf.data), nameTableBuf.dataSize); Tablebuf_appendTable(&tableBuf, "maxp", (void *)(&maxpTable_Version10), sizeof(MaxpTable_Version10)); Tablebuf_appendTable(&tableBuf, "cmap", (void *)(glyphTablesBuf.cmapByteArray.data), glyphTablesBuf.cmapByteArray.length); Tablebuf_appendTable(&tableBuf, "loca", (void *)(glyphTablesBuf.locaByteArray.data), glyphTablesBuf.locaByteArray.length); Tablebuf_appendTable(&tableBuf, "glyf", (void *)(glyphTablesBuf.glyfData), glyphTablesBuf.glyfDataSize); Tablebuf_appendTable(&tableBuf, "hhea", (void *)(&hheaTable), sizeof(HheaTable)); Tablebuf_appendTable(&tableBuf, "hmtx", (void *)(hmtxTableBuf.byteArray.data), hmtxTableBuf.byteArray.length); Tablebuf_appendTable(&tableBuf, "post", (void *)(&postTable), sizeof(PostTable_Header)); // offsetは、Tableのフォントファイル先頭からのオフセット。先に計算しておく。 const size_t offsetHeadSize = sizeof(OffsetTable) + (sizeof(TableDirectory_Member) * tableBuf.appendTableNum); Tablebuf_finallyTableDirectoryOffset(&tableBuf, offsetHeadSize); /** OffsetTable: (Offset Subtable, sfnt) */ Uint32Type sfntVersion; //memcpy((uint8_t *)&sfntVersion, "OTTO", 4); sfntVersion = 0x00010000; OffsetTable offsetTable; ASSERT(OffsetTable_init(&offsetTable, sfntVersion, tableBuf.appendTableNum)); size_t fontDataSize = sizeof(OffsetTable) + (sizeof(TableDirectory_Member) * tableBuf.appendTableNum) + tableBuf.dataSize ; // 開放はアプリ終了時に任せる uint8_t *fontData = (uint8_t *)ffmalloc(sizeof(uint8_t) * fontDataSize); /** フォントデータを連結(HeadTable.checkSumAdjustmentの計算に用いる) */ DEBUG_LOG("OffsetTable :%zu", sizeof(OffsetTable)); DEBUG_LOG("TableDirectory :%zu", sizeof(TableDirectory_Member) * tableBuf.appendTableNum); DEBUG_LOG("Table :%zu", tableBuf.dataSize); size_t offset = 0; memcpy(&fontData[offset], (uint8_t *)&offsetTable, sizeof(OffsetTable)); offset += sizeof(OffsetTable); memcpy(&fontData[offset], (uint8_t *)tableBuf.tableDirectory, (sizeof(TableDirectory_Member) * tableBuf.appendTableNum)); offset += (sizeof(TableDirectory_Member) * tableBuf.appendTableNum); memcpy(&fontData[offset], (uint8_t *)tableBuf.data, tableBuf.dataSize); /** 'head'TableにcheckSumAdjustment要素を計算して書き込む。 */ Uint32Type checkSumAdjustment = 0xB1B0AFBA - CalcTableChecksum((uint32_t *)fontData, fontDataSize); size_t checkSumAdjustmentOffset = sizeof(OffsetTable) + (sizeof(TableDirectory_Member) * tableBuf.appendTableNum) + 0 // 'head' Tableは先頭に置くこととする。 + offsetof(HeadTable, checkSumAdjustment) ; DEBUG_LOG("checkSumAdjustment:%zu(0x%04lx) 0x%08x", checkSumAdjustmentOffset, checkSumAdjustmentOffset, checkSumAdjustment); uint32_t *checkSumAdjustmentPointer = (uint32_t *)&(fontData[checkSumAdjustmentOffset]); *checkSumAdjustmentPointer = htonl(checkSumAdjustment); /** ファイル書き出し */ char *fontfilename = (char *)ffmalloc(strlen(fontname) + 5); sprintf(fontfilename, "%s.otf", fontname); int fd = open(fontfilename, O_CREAT|O_TRUNC|O_RDWR, 0777); if(-1 == fd){ fprintf(stderr, "open: %d %s\n", errno, strerror(errno)); return 1; } ssize_t s; s = write(fd, fontData, fontDataSize); if(0 == s){ fprintf(stderr, "write: %d %s\n", errno, strerror(errno)); return 1; } close(fd); return 0; }
Martin Joseph Fettman (B.S., D.V.M., M.S., Ph.D., Diplomate, ACVP) is an American pathologist and researcher who flew on NASA Space Shuttle mission STS-58 aboard the Space Shuttle Columbia as a Payload Specialist. Personal data [ edit ] Born December 31, 1956, Brooklyn, New York. Married to Heather Connally DVM MS DACVECC. Recreational interests include scuba diving, amateur radio, flying, bicycling, pistol marksmanship, camping and mountain hiking, photography, travel, reading (mysteries), and music (jazz and classical). His mother, Mrs. Elaine Fettman Peck, resides in Brooklyn, New York, with his stepfather, Mr. Harold Peck. His father, Mr. Bernard P. Fettman, is deceased. Education [ edit ] 1973: Graduated from Midwood High School, Brooklyn, New York 1976: Received bachelor of science degree in animal nutrition from Cornell University 1980: Received a doctor of veterinary medicine degree and master of science degree in nutrition from Cornell University 1982: Received a doctor of philosophy degree in physiology from Colorado State University 1984: Received board certification in veterinary clinical pathology Diplomate of the American College of Veterinary Pathologists Organizations [ edit ] American Academy of Veterinary Nutrition American Association for Clinical Chemistry American Association for the Advancement of Science American College of Veterinary Pathologists American Dairy Science Association American Society of Animal Science American Society of Gravitational and Space Biology American Veterinary Medical Association Association of Veterinarians for Animal Rights New York Academy of Sciences Shock Society National Audubon Society (life) National Wildlife Federation (life) Nature Conservancy Sierra Club Publications [ edit ] He has published over 100 research articles in refereed scientific journals. Experience [ edit ] Fettman's first faculty appointment was 1982-1986 in the Department of Pathology of the College of Veterinary Medicine and Biomedical Sciences at Colorado State University, as an Assistant Professor of Clinical Pathology whose duties included teaching, research, and clinical service. From 1983 to the present, he has held a joint appointment in the Department of Physiology at Colorado State University and his research and teaching interests have focused on selected aspects of the Pathophysiology of nutritional and metabolic diseases, with emphasis on the physiological biochemistry of energy, electrolyte, and fluid metabolism. In 1986 he was promoted to Associate Professor, and in 1988 assumed the duties of section chief of Clinical Pathology in the Veterinary Teaching Hospital at Colorado State University. Fettman spent one year (1989–1990) on sabbatical leave as a Visiting Professor of Medicine at The Queen Elizabeth Hospital and the University of Adelaide, South Australia, where he worked with the Gastroenterology Unit studying the biochemical epidemiology of human colorectal cancer. He was appointed to the Mark L. Morris Chair in Clinical Nutrition at Colorado State University and received a joint appointment in the Department of Clinical Sciences in 1991, and was promoted to Full Professor of Pathology in 1992. Fettman is a George H. Glover distinguished faculty member of the College of Veterinary Medicine and Biomedical Sciences and was named the 1994 Sigma Xi honored scientist at Colorado State University, the 1994 Spencer T. and Ann W. Olin Lecturer at Cornell University, and a Bard College Distinguished Scientist for 1995. Spaceflight [ edit ] Fettman was selected as a NASA payload specialist candidate in December 1991, as the prime payload specialist for Spacelab Life Sciences-2 in October 1992. He then flew on STS-58 in October 1993. Since the flight, he has made over seventy public appearances representing space life sciences research before higher education, medical, veterinary, and lay organizations, and visited over twenty K-12 schools around the United States and Canada. He is a member of the NASA Advisory Council Life and Biomedical Sciences and Applications Advisory Subcommittee.[1]
<gh_stars>0 interface AppDefaultComponentProps { } interface AppComponentState { isOAuthReady: boolean, OSMOAuth: osmAuthInstance, isAuthenticated: boolean, isQuarifiedBrowser: boolean, } interface OSMLoggedInComponentProps { oauth: osmAuthInstance } interface OSMLoggedInComponentState { notes: any[], noteComments: any[], coordinate: { lat: string, lon: string, }, userName: string, } interface MapComponentProps { centerCoordinate?: { lat: string, lon: string, } notes?: any[] } interface NoteListComponentProps { notes: any[], noteComments: any[], osmServer: string, userName: string, } interface NoteComponentProps { note: any, noteComments: any[], userName: string, osmServer: string, } interface NoteComponentState { currentNote: any, currentNoteComments: any[] } interface CommentComponentProps { noteComment: any, }
def _SortInstancesForDisplay(instances): instances.sort(key=lambda ins: ins.createtime, reverse=True) instances.sort(key=lambda ins: ins.AdbConnected(), reverse=True) return instances
<filename>sklearn/pipeline.py #!/usr/bin/env python # -*- coding: utf-8 -*- from sklearn.preprocessing import StandardScaler from sklearn.linear_model import LogisticRegression from sklearn.pipeline import make_pipeline from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score # create a pipeline object pipe = make_pipeline( StandardScaler(), LogisticRegression() ) # load the iris dataset and split it into train and test sets X, y = load_iris(return_X_y=True) X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0) # fit the whole pipeline pipe.fit(X_train, y_train) # we can now use it like any other estimator print(accuracy_score(pipe.predict(X_test), y_test))
/// Default destructor /// Memory taken is released by the parent class destructor AliQnCorrectionsDetectorConfigurationTracks::~AliQnCorrectionsDetectorConfigurationTracks() { if (fQAQnAverageHistogram != NULL) delete fQAQnAverageHistogram; }
/** * Controller for the REST api exposing data types of domain model. * Controller returns response objects containing list of options * possible for each enumerable data type in application. * Returns objects wrapped in a structure for JTable. * @author Michal Jendrzejek */ @RestController @RequestMapping("api/options/") public class JTableOptionsRestController { private final JTableResponseBuilder responseBuilder; @Autowired public JTableOptionsRestController( JTableResponseBuilder responseBuilder ) { this.responseBuilder = responseBuilder; } @RequestMapping( value = "amountPeriod", produces = MediaType.APPLICATION_JSON_VALUE ) public JTableResponse amountPeriodOptions() { List<Option> optionList = new ArrayList<>(); optionList.add(new Option( AmountPeriod.DAY.toString(), AmountPeriod.DAY.toString() )); optionList.add(new Option( AmountPeriod.WEEK.toString(), AmountPeriod.WEEK.toString() )); optionList.add(new Option( AmountPeriod.MONTH.toString(), AmountPeriod.MONTH.toString() )); optionList.add(new Option( AmountPeriod.YEAR.toString(), AmountPeriod.YEAR.toString() )); return responseBuilder.prepareOptionsResponse(optionList); } @RequestMapping( value = "amountType", produces = MediaType.APPLICATION_JSON_VALUE ) public JTableResponse amountTypeOptions() { List<Option> optionList = new ArrayList<>(); optionList.add(new Option( AmountType.NET.toString(), AmountType.NET.toString() )); optionList.add(new Option( AmountType.BRU.toString(), AmountType.BRU.toString() )); return responseBuilder.prepareOptionsResponse(optionList); } }
<filename>InputSurface/GCControlSurface.h #ifndef _GCCONTROLSURFACE_H_ #define _GCCONTROLSURFACE_H_ #ifndef __COCOS2D_H__ #include "cocos2d.h" #endif #ifndef B2_MATH_H #include "Box2D.h" #endif #ifndef _GCTYPES_H_ #include "GamerCamp/Core/GCTypes.h" #endif #include "GamerCamp/EventSystem/IEventSource.h" #include "GamerCamp/EventSystem/CEventManager.h" #include "GamerCamp/Core/ServiceLocator.h" #include "GamerCamp/Input/GCTouchData.h" #include "GamerCamp/Input/GCTouchEventData.h" #include <functional> ////////////////////////////////////////////////////////////////////////// // forward declare class IGCGameLayer; ////////////////////////////////////////////////////////////////////////// // A Control surface management layer that sit in between a menu or game // layer and cocos2d layer to simplify the creation of touchable control // zones // // ////////////////////////////////////////////////////////////////////////// class CGCControlSurface : public IEventSource { private: CGCTouchEventData m_touchBeginData, m_touchMoveData , m_touchEndData ; std::vector<SGCTouchData> m_stdvTouches; std::vector<CGCControlArea> m_stdvControlAreas; // revoke copy CGCControlSurface( const CGCControlSurface& ); CGCControlSurface& operator=( const CGCControlSurface& ); public: CGCControlSurface( void ); virtual ~CGCControlSurface( void ); void ccTouchesBegan( cocos2d::CCSet* pTouches, cocos2d::CCEvent* pEvent ); void ccTouchesMoved( cocos2d::CCSet* pTouches, cocos2d::CCEvent* pEvent ); void ccTouchesEnded( cocos2d::CCSet* pTouches, cocos2d::CCEvent* pEvent ); CGCControlArea* AddControlArea ( const cocos2d::CCRect& controlArea, u32 uControlIndentifier ); CGCControlArea* DetectControlTouch( const cocos2d::CCPoint& touchPosition ); void CreateControlEvent( CEventData* pData ); CGCControlArea* GetControlAreaByID( u32 uId ); }; #endif
Query based event extraction along a timeline In this paper, we present a framework and a system that extracts events relevant to a query from a collection C of documents, and places such events along a timeline. Each event is represented by a sentence extracted from C, based on the assumption that "important" events are widely cited in many documents for a period of time within which these events are of interest. In our experiments, we used queries that are event types ("earthquake") and person names (e.g. "George Bush"). Evaluation was performed using G8 leader names as queries: comparison made by human evaluators between manually and system generated timelines showed that although manually generated timelines are on average more preferable, system generated timelines are sometimes judged to be better than manually constructed ones.
/** * @author hakonhall * @author bjorncs */ public class HostSuspensionRequestHandler extends RestApiRequestHandler<HostSuspensionRequestHandler> { private static final Logger log = Logger.getLogger(HostSuspensionRequestHandler.class.getName()); private final Orchestrator orchestrator; @Inject public HostSuspensionRequestHandler(LoggingRequestHandler.Context context, Orchestrator orchestrator) { super(context, HostSuspensionRequestHandler::createRestApiDefinition); this.orchestrator = orchestrator; } private static RestApi createRestApiDefinition(HostSuspensionRequestHandler self) { return RestApi.builder() .addRoute(RestApi.route("/orchestrator/v1/suspensions/hosts/{hostname}") .put(self::suspendAll)) .registerJacksonResponseEntity(BatchOperationResult.class) .build(); } private BatchOperationResult suspendAll(RestApi.RequestContext context) { String parentHostnameString = context.pathParameters().getStringOrThrow("hostname"); List<String> hostnamesAsStrings = context.queryParameters().getStringList("hostname"); HostName parentHostname = new HostName(parentHostnameString); List<HostName> hostnames = hostnamesAsStrings.stream().map(HostName::new).collect(Collectors.toList()); try { orchestrator.suspendAll(parentHostname, hostnames); } catch (BatchHostStateChangeDeniedException e) { log.log(Level.FINE, e, () -> "Failed to suspend nodes " + hostnames + " with parent host " + parentHostname); throw createRestApiException(e.getMessage(), Response.Status.CONFLICT, e); } catch (UncheckedTimeoutException e) { log.log(Level.FINE, e, () -> "Failed to suspend nodes " + hostnames + " with parent host " + parentHostname); throw createRestApiException(e.getMessage(), Response.Status.CONFLICT, e); } catch (BatchHostNameNotFoundException e) { log.log(Level.FINE, e, () -> "Failed to suspend nodes " + hostnames + " with parent host " + parentHostname); // Note that we're returning BAD_REQUEST instead of NOT_FOUND because the resource identified // by the URL path was found. It's one of the hostnames in the request it failed to find. throw createRestApiException(e.getMessage(), Response.Status.BAD_REQUEST, e); } catch (BatchInternalErrorException e) { log.log(Level.FINE, e, () -> "Failed to suspend nodes " + hostnames + " with parent host " + parentHostname); throw createRestApiException(e.getMessage(), Response.Status.INTERNAL_SERVER_ERROR, e); } log.log(Level.FINE, () -> "Suspended " + hostnames + " with parent " + parentHostname); return BatchOperationResult.successResult(); } private RestApiException createRestApiException(String errorMessage, int statusCode, Throwable cause) { return new RestApiException( new JacksonJsonResponse<>(statusCode, new BatchOperationResult(errorMessage), true), errorMessage, cause); } }
<reponame>tech-niche-biz/bacula-9.4.4<filename>src/dird/ua_prune.c<gh_stars>0 /* Bacula(R) - The Network Backup Solution Copyright (C) 2000-2017 <NAME> The original author of Bacula is <NAME>, with contributions from many others, a complete list can be found in the file AUTHORS. You may use this file and others of this release according to the license defined in the LICENSE file, which includes the Affero General Public License, v3.0 ("AGPLv3") and some additional permissions and terms pursuant to its AGPLv3 Section 7. This notice must be preserved when any source code is conveyed and/or propagated. Bacula(R) is a registered trademark of Kern Sibbald. */ /* * Bacula Director -- User Agent Database prune Command * Applies retention periods * * <NAME>, February MMII */ #include "bacula.h" #include "dird.h" /* Imported functions */ /* Forward referenced functions */ static bool grow_del_list(struct del_ctx *del); static bool prune_expired_volumes(UAContext*); static bool prune_selected_volumes(UAContext *ua); /* * Called here to count entries to be deleted */ int del_count_handler(void *ctx, int num_fields, char **row) { struct s_count_ctx *cnt = (struct s_count_ctx *)ctx; if (row[0]) { cnt->count = str_to_int64(row[0]); } else { cnt->count = 0; } return 0; } /* * Called here to make in memory list of JobIds to be * deleted and the associated PurgedFiles flag. * The in memory list will then be transversed * to issue the SQL DELETE commands. Note, the list * is allowed to get to MAX_DEL_LIST_LEN to limit the * maximum malloc'ed memory. */ int job_delete_handler(void *ctx, int num_fields, char **row) { struct del_ctx *del = (struct del_ctx *)ctx; if (!grow_del_list(del)) { return 1; } del->JobId[del->num_ids] = (JobId_t)str_to_int64(row[0]); Dmsg2(60, "job_delete_handler row=%d val=%d\n", del->num_ids, del->JobId[del->num_ids]); del->PurgedFiles[del->num_ids++] = (char)str_to_int64(row[1]); return 0; } int file_delete_handler(void *ctx, int num_fields, char **row) { struct del_ctx *del = (struct del_ctx *)ctx; if (!grow_del_list(del)) { return 1; } del->JobId[del->num_ids++] = (JobId_t)str_to_int64(row[0]); // Dmsg2(150, "row=%d val=%d\n", del->num_ids-1, del->JobId[del->num_ids-1]); return 0; } /* Prune jobs or files for all combinations of Client/Pool that we * can find in the Job table. Doing so, the pruning will not prune a * job that is needed to restore the client. As the command will detect * all parameters automatically, it is very convenient to schedule it a * couple of times per day. */ static int prune_all_clients_and_pools(UAContext *ua, int kw) { alist results(owned_by_alist, 100); POOL_MEM label; CLIENT *client; POOL *pool; /* Get the combination of all Client/Pool in the Job table (respecting the ACLs) */ if (!db_get_client_pool(ua->jcr, ua->db, &results)) { ua->error_msg(_("Unable to list Client/Pool. ERR=%s\n"), ua->db->errmsg); return false; } while (!results.empty()) { /* Each "record" is made of two values in results */ char *pool_s = (char *)results.pop(); char *client_s = (char *)results.pop(); Dmsg2(100, "Trying to prune %s/%s\n", client_s, pool_s); if (!pool_s || !client_s) { /* Just in case */ ua->error_msg(_("Unable to list Client/Pool %s/%s\n"), NPRTB(client_s), NPRTB(pool_s)); bfree_and_null(pool_s); bfree_and_null(client_s); return false; } /* Make sure the client and the pool are still defined */ client = (CLIENT *)GetResWithName(R_CLIENT, client_s); pool = (POOL *)GetResWithName(R_POOL, pool_s); if (!client || !pool) { Dmsg2(10, "Skip pruning of %s/%s, one resource is missing\n", client_s, pool_s); } free(client_s); free(pool_s); if (!client || !pool) { continue; } /* Display correct messages and do the actual pruning */ if (kw == 0) { ua->info_msg(_("Pruning Files for Client %s with Pool %s...\n"), client->name(), pool->name()); if (pool->FileRetention > 0) { Mmsg(label, "Pool %s File", pool->name()); if (!confirm_retention(ua, &pool->FileRetention, label.c_str())) { return false; } } else { Mmsg(label, "Client %s File", client->name()); if (!confirm_retention(ua, &client->FileRetention, label.c_str())) { return false; } } prune_files(ua, client, pool); } if (kw == 1) { ua->info_msg(_("Pruning Jobs for Client %s with Pool %s...\n"), client->name(), pool->name()); if (pool->JobRetention > 0) { Mmsg(label, "Pool %s Job", pool->name()); if (!confirm_retention(ua, &pool->JobRetention, label.c_str())) { return false; } } else { Mmsg(label, "Client %s Job", client->name()); if (!confirm_retention(ua, &client->JobRetention, label.c_str())) { return false; } } prune_jobs(ua, client, pool, JT_BACKUP); } } return true; } /* * Prune records from database * * prune files (from) client=xxx [pool=yyy] * prune jobs (from) client=xxx [pool=yyy] * prune volume=xxx * prune stats */ int prunecmd(UAContext *ua, const char *cmd) { DIRRES *dir; CLIENT *client; POOL *pool; MEDIA_DBR mr; utime_t retention; int kw; static const char *keywords[] = { NT_("Files"), NT_("Jobs"), NT_("Volume"), NT_("Stats"), NT_("Snapshots"), NULL}; if (!open_new_client_db(ua)) { return false; } /* First search args */ kw = find_arg_keyword(ua, keywords); if (kw < 0 || kw > 4) { /* no args, so ask user */ kw = do_keyword_prompt(ua, _("Choose item to prune"), keywords); } /* prune files/jobs all (prune all Client/Pool automatically) */ if ((kw == 0 || kw == 1) && find_arg(ua, _("all")) > 0) { return prune_all_clients_and_pools(ua, kw); } switch (kw) { case 0: /* prune files */ /* We restrict the client list to ClientAcl, maybe something to change later */ if (!(client = get_client_resource(ua, JT_SYSTEM))) { return false; } if (find_arg_with_value(ua, "pool") >= 0) { pool = get_pool_resource(ua); } else { pool = NULL; } /* Pool File Retention takes precedence over client File Retention */ if (pool && pool->FileRetention > 0) { if (!confirm_retention(ua, &pool->FileRetention, "File")) { return false; } } else if (!confirm_retention(ua, &client->FileRetention, "File")) { return false; } prune_files(ua, client, pool); return true; case 1: /* prune jobs */ /* We restrict the client list to ClientAcl, maybe something to change later */ if (!(client = get_client_resource(ua, JT_SYSTEM))) { return false; } if (find_arg_with_value(ua, "pool") >= 0) { pool = get_pool_resource(ua); } else { pool = NULL; } /* Pool Job Retention takes precedence over client Job Retention */ if (pool && pool->JobRetention > 0) { if (!confirm_retention(ua, &pool->JobRetention, "Job")) { return false; } } else if (!confirm_retention(ua, &client->JobRetention, "Job")) { return false; } /* ****FIXME**** allow user to select JobType */ prune_jobs(ua, client, pool, JT_BACKUP); return 1; case 2: /* prune volume */ /* Look for All expired volumes, mostly designed for runscript */ if (find_arg(ua, "expired") >= 0) { return prune_expired_volumes(ua); } prune_selected_volumes(ua); return true; case 3: /* prune stats */ dir = (DIRRES *)GetNextRes(R_DIRECTOR, NULL); if (!dir->stats_retention) { return false; } retention = dir->stats_retention; if (!confirm_retention(ua, &retention, "Statistics")) { return false; } prune_stats(ua, retention); return true; case 4: /* prune snapshots */ prune_snapshot(ua); return true; default: break; } return true; } /* Prune Job stat records from the database. * */ int prune_stats(UAContext *ua, utime_t retention) { char ed1[50]; POOL_MEM query(PM_MESSAGE); utime_t now = (utime_t)time(NULL); db_lock(ua->db); Mmsg(query, "DELETE FROM JobHisto WHERE JobTDate < %s", edit_int64(now - retention, ed1)); db_sql_query(ua->db, query.c_str(), NULL, NULL); db_unlock(ua->db); ua->info_msg(_("Pruned Jobs from JobHisto catalog.\n")); return true; } /* * Use pool and client specified by user to select jobs to prune * returns add_from string to add in FROM clause * add_where string to add in WHERE clause */ bool prune_set_filter(UAContext *ua, CLIENT *client, POOL *pool, utime_t period, POOL_MEM *add_from, POOL_MEM *add_where) { utime_t now; char ed1[50], ed2[MAX_ESCAPE_NAME_LENGTH]; POOL_MEM tmp(PM_MESSAGE); now = (utime_t)time(NULL); edit_int64(now - period, ed1); Dmsg3(150, "now=%lld period=%lld JobTDate=%s\n", now, period, ed1); Mmsg(tmp, " AND JobTDate < %s ", ed1); pm_strcat(*add_where, tmp.c_str()); db_lock(ua->db); if (client) { db_escape_string(ua->jcr, ua->db, ed2, client->name(), strlen(client->name())); Mmsg(tmp, " AND Client.Name = '%s' ", ed2); pm_strcat(*add_where, tmp.c_str()); pm_strcat(*add_from, " JOIN Client USING (ClientId) "); } if (pool) { db_escape_string(ua->jcr, ua->db, ed2, pool->name(), strlen(pool->name())); Mmsg(tmp, " AND Pool.Name = '%s' ", ed2); pm_strcat(*add_where, tmp.c_str()); /* Use ON() instead of USING for some old SQLite */ pm_strcat(*add_from, " JOIN Pool ON (Job.PoolId = Pool.PoolId) "); } Dmsg2(150, "f=%s w=%s\n", add_from->c_str(), add_where->c_str()); db_unlock(ua->db); return true; } /* * Prune File records from the database. For any Job which * is older than the retention period, we unconditionally delete * all File records for that Job. This is simple enough that no * temporary tables are needed. We simply make an in memory list of * the JobIds meeting the prune conditions, then delete all File records * pointing to each of those JobIds. * * This routine assumes you want the pruning to be done. All checking * must be done before calling this routine. * * Note: client or pool can possibly be NULL (not both). */ int prune_files(UAContext *ua, CLIENT *client, POOL *pool) { struct del_ctx del; struct s_count_ctx cnt; POOL_MEM query(PM_MESSAGE); POOL_MEM sql_where(PM_MESSAGE); POOL_MEM sql_from(PM_MESSAGE); utime_t period; char ed1[50]; memset(&del, 0, sizeof(del)); if (pool && pool->FileRetention > 0) { period = pool->FileRetention; } else if (client) { period = client->FileRetention; } else { /* should specify at least pool or client */ return false; } db_lock(ua->db); /* Specify JobTDate and Pool.Name= and/or Client.Name= in the query */ if (!prune_set_filter(ua, client, pool, period, &sql_from, &sql_where)) { goto bail_out; } // edit_utime(now-period, ed1, sizeof(ed1)); // Jmsg(ua->jcr, M_INFO, 0, _("Begin pruning Jobs older than %s secs.\n"), ed1) if (ua->jcr->getJobType() != JT_CONSOLE) { Jmsg(ua->jcr, M_INFO, 0, _("Begin pruning Files.\n")); } /* Select Jobs -- for counting */ Mmsg(query, "SELECT COUNT(1) FROM Job %s WHERE PurgedFiles=0 %s", sql_from.c_str(), sql_where.c_str()); Dmsg1(100, "select sql=%s\n", query.c_str()); cnt.count = 0; if (!db_sql_query(ua->db, query.c_str(), del_count_handler, (void *)&cnt)) { ua->error_msg("%s", db_strerror(ua->db)); Dmsg0(100, "Count failed\n"); goto bail_out; } if (cnt.count == 0) { if (ua->verbose) { ua->warning_msg(_("No Files found to prune.\n")); } goto bail_out; } if (cnt.count < MAX_DEL_LIST_LEN) { del.max_ids = cnt.count + 1; } else { del.max_ids = MAX_DEL_LIST_LEN; } del.tot_ids = 0; del.JobId = (JobId_t *)malloc(sizeof(JobId_t) * del.max_ids); /* Now process same set but making a delete list */ Mmsg(query, "SELECT JobId FROM Job %s WHERE PurgedFiles=0 %s", sql_from.c_str(), sql_where.c_str()); Dmsg1(100, "select sql=%s\n", query.c_str()); db_sql_query(ua->db, query.c_str(), file_delete_handler, (void *)&del); purge_files_from_job_list(ua, del); edit_uint64_with_commas(del.num_del, ed1); ua->info_msg(_("Pruned Files from %s Jobs for client %s from catalog.\n"), ed1, client->name()); bail_out: db_unlock(ua->db); if (del.JobId) { free(del.JobId); } return 1; } static void drop_temp_tables(UAContext *ua) { int i; for (i=0; drop_deltabs[i]; i++) { db_sql_query(ua->db, drop_deltabs[i], NULL, (void *)NULL); } } static bool create_temp_tables(UAContext *ua) { /* Create temp tables and indicies */ if (!db_sql_query(ua->db, create_deltabs[ua->db->bdb_get_type_index()], NULL, (void *)NULL)) { ua->error_msg("%s", db_strerror(ua->db)); Dmsg0(100, "create DelTables table failed\n"); return false; } if (!db_sql_query(ua->db, create_delindex, NULL, (void *)NULL)) { ua->error_msg("%s", db_strerror(ua->db)); Dmsg0(100, "create DelInx1 index failed\n"); return false; } return true; } static bool grow_del_list(struct del_ctx *del) { if (del->num_ids == MAX_DEL_LIST_LEN) { return false; } if (del->num_ids == del->max_ids) { del->max_ids = (del->max_ids * 3) / 2; del->JobId = (JobId_t *)brealloc(del->JobId, sizeof(JobId_t) * del->max_ids); del->PurgedFiles = (char *)brealloc(del->PurgedFiles, del->max_ids); } return true; } struct accurate_check_ctx { DBId_t ClientId; /* Id of client */ DBId_t FileSetId; /* Id of FileSet */ }; /* row: Job.Name, FileSet, Client.Name, FileSetId, ClientId, Type */ static int job_select_handler(void *ctx, int num_fields, char **row) { alist *lst = (alist *)ctx; struct accurate_check_ctx *res; ASSERT(num_fields == 6); /* Quick fix for #5507, avoid locking res_head after db_lock() */ #ifdef bug5507 /* If this job doesn't exist anymore in the configuration, delete it */ if (GetResWithName(R_JOB, row[0]) == NULL) { return 0; } /* If this fileset doesn't exist anymore in the configuration, delete it */ if (GetResWithName(R_FILESET, row[1]) == NULL) { return 0; } /* If this client doesn't exist anymore in the configuration, delete it */ if (GetResWithName(R_CLIENT, row[2]) == NULL) { return 0; } #endif /* Don't compute accurate things for Verify jobs */ if (*row[5] == 'V') { return 0; } res = (struct accurate_check_ctx*) malloc(sizeof(struct accurate_check_ctx)); res->FileSetId = str_to_int64(row[3]); res->ClientId = str_to_int64(row[4]); lst->append(res); // Dmsg2(150, "row=%d val=%d\n", del->num_ids-1, del->JobId[del->num_ids-1]); return 0; } /* * Pruning Jobs is a bit more complicated than purging Files * because we delete Job records only if there is a more current * backup of the FileSet. Otherwise, we keep the Job record. * In other words, we never delete the only Job record that * contains a current backup of a FileSet. This prevents the * Volume from being recycled and destroying a current backup. * * For Verify Jobs, we do not delete the last InitCatalog. * * For Restore Jobs there are no restrictions. */ int prune_jobs(UAContext *ua, CLIENT *client, POOL *pool, int JobType) { POOL_MEM query(PM_MESSAGE); POOL_MEM sql_where(PM_MESSAGE); POOL_MEM sql_from(PM_MESSAGE); utime_t period; char ed1[50]; alist *jobids_check=NULL; struct accurate_check_ctx *elt; db_list_ctx jobids, tempids; JOB_DBR jr; struct del_ctx del; memset(&del, 0, sizeof(del)); if (pool && pool->JobRetention > 0) { period = pool->JobRetention; } else if (client) { period = client->JobRetention; } else { /* should specify at least pool or client */ return false; } db_lock(ua->db); if (!prune_set_filter(ua, client, pool, period, &sql_from, &sql_where)) { goto bail_out; } /* Drop any previous temporary tables still there */ drop_temp_tables(ua); /* Create temp tables and indicies */ if (!create_temp_tables(ua)) { goto bail_out; } if (ua->jcr->getJobType() != JT_CONSOLE) { edit_utime(period, ed1, sizeof(ed1)); Jmsg(ua->jcr, M_INFO, 0, _("Begin pruning Jobs older than %s.\n"), ed1); } del.max_ids = 100; del.JobId = (JobId_t *)malloc(sizeof(JobId_t) * del.max_ids); del.PurgedFiles = (char *)malloc(del.max_ids); /* * Select all files that are older than the JobRetention period * and add them into the "DeletionCandidates" table. */ Mmsg(query, "INSERT INTO DelCandidates " "SELECT JobId,PurgedFiles,FileSetId,JobFiles,JobStatus " "FROM Job %s " /* JOIN Pool/Client */ "WHERE Type IN ('B', 'C', 'M', 'V', 'D', 'R', 'c', 'm', 'g') " " %s ", /* Pool/Client + JobTDate */ sql_from.c_str(), sql_where.c_str()); Dmsg1(100, "select sql=%s\n", query.c_str()); if (!db_sql_query(ua->db, query.c_str(), NULL, (void *)NULL)) { if (ua->verbose) { ua->error_msg("%s", db_strerror(ua->db)); } goto bail_out; } /* Now, for the selection, we discard some of them in order to be always * able to restore files. (ie, last full, last diff, last incrs) * Note: The DISTINCT could be more useful if we don't get FileSetId */ jobids_check = New(alist(10, owned_by_alist)); Mmsg(query, "SELECT DISTINCT Job.Name, FileSet, Client.Name, Job.FileSetId, " "Job.ClientId, Job.Type " "FROM DelCandidates " "JOIN Job USING (JobId) " "JOIN Client USING (ClientId) " "JOIN FileSet ON (Job.FileSetId = FileSet.FileSetId) " "WHERE Job.Type IN ('B') " /* Look only Backup jobs */ "AND Job.JobStatus IN ('T', 'W') " /* Look only useful jobs */ ); /* The job_select_handler will skip jobs or filesets that are no longer * in the configuration file. Interesting ClientId/FileSetId will be * added to jobids_check (currently disabled in 6.0.7b) */ if (!db_sql_query(ua->db, query.c_str(), job_select_handler, jobids_check)) { ua->error_msg("%s", db_strerror(ua->db)); } /* For this selection, we exclude current jobs used for restore or * accurate. This will prevent to prune the last full backup used for * current backup & restore */ memset(&jr, 0, sizeof(jr)); /* To find useful jobs, we do like an incremental */ jr.JobLevel = L_INCREMENTAL; foreach_alist(elt, jobids_check) { jr.ClientId = elt->ClientId; /* should be always the same */ jr.FileSetId = elt->FileSetId; db_get_accurate_jobids(ua->jcr, ua->db, &jr, &tempids); jobids.add(tempids); } /* Discard latest Verify level=InitCatalog job * TODO: can have multiple fileset */ Mmsg(query, "SELECT JobId, JobTDate " "FROM Job %s " /* JOIN Client/Pool */ "WHERE Type='V' AND Level='V' " " %s " /* Pool, JobTDate, Client */ "ORDER BY JobTDate DESC LIMIT 1", sql_from.c_str(), sql_where.c_str()); if (!db_sql_query(ua->db, query.c_str(), db_list_handler, &jobids)) { ua->error_msg("%s", db_strerror(ua->db)); } /* If we found jobs to exclude from the DelCandidates list, we should * also remove BaseJobs that can be linked with them */ if (jobids.count > 0) { Dmsg1(60, "jobids to exclude before basejobs = %s\n", jobids.list); /* We also need to exclude all basejobs used */ db_get_used_base_jobids(ua->jcr, ua->db, jobids.list, &jobids); /* Removing useful jobs from the DelCandidates list */ Mmsg(query, "DELETE FROM DelCandidates " "WHERE JobId IN (%s) " /* JobId used in accurate */ "AND JobFiles!=0", /* Discard when JobFiles=0 */ jobids.list); if (!db_sql_query(ua->db, query.c_str(), NULL, NULL)) { ua->error_msg("%s", db_strerror(ua->db)); goto bail_out; /* Don't continue if the list isn't clean */ } Dmsg1(60, "jobids to exclude = %s\n", jobids.list); } /* We use DISTINCT because we can have two times the same job */ Mmsg(query, "SELECT DISTINCT DelCandidates.JobId,DelCandidates.PurgedFiles " "FROM DelCandidates"); if (!db_sql_query(ua->db, query.c_str(), job_delete_handler, (void *)&del)) { ua->error_msg("%s", db_strerror(ua->db)); } purge_job_list_from_catalog(ua, del); if (del.num_del > 0) { ua->info_msg(_("Pruned %d %s for client %s from catalog.\n"), del.num_del, del.num_del==1?_("Job"):_("Jobs"), client->name()); } else if (ua->verbose) { ua->info_msg(_("No Jobs found to prune.\n")); } bail_out: drop_temp_tables(ua); db_unlock(ua->db); if (del.JobId) { free(del.JobId); } if (del.PurgedFiles) { free(del.PurgedFiles); } if (jobids_check) { delete jobids_check; } return 1; } static bool prune_selected_volumes(UAContext *ua) { int nb=0; uint32_t *results=NULL; MEDIA_DBR mr; POOL_DBR pr; JCR *jcr = ua->jcr; POOL_MEM tmp; mr.Recycle=1; /* Look for volumes to prune and recycle */ if (!scan_storage_cmd(ua, ua->cmd, false, /* fromallpool*/ NULL /* drive */, &mr, &pr, NULL /* action */, NULL /* storage */, &nb, &results)) { goto bail_out; } for (int i = 0; i < nb; i++) { mr.clear(); mr.MediaId = results[i]; if (!db_get_media_record(jcr, jcr->db, &mr)) { ua->error_msg(_("Unable to get Media record for MediaId %d.\n"), mr.MediaId); continue; } if (mr.Enabled == 2 || strcmp(mr.VolStatus, "Archive") == 0) { ua->error_msg(_("Cannot prune Volume \"%s\" because it is archived.\n"), mr.VolumeName); continue; } if (strcmp(mr.VolStatus, "Full") != 0 && strcmp(mr.VolStatus, "Used") != 0 ) { ua->error_msg(_("Cannot prune Volume \"%s\" because the volume status is \"%s\" and should be Full or Used.\n"), mr.VolumeName, mr.VolStatus); continue; } Mmsg(tmp, "Volume \"%s\"", mr.VolumeName); if (!confirm_retention(ua, &mr.VolRetention, tmp.c_str())) { goto bail_out; } prune_volume(ua, &mr); } bail_out: if (results) { free(results); } return true; } /* * Prune a expired Volumes */ static bool prune_expired_volumes(UAContext *ua) { bool ok=false; POOL_MEM query(PM_MESSAGE); POOL_MEM filter(PM_MESSAGE); alist *lst=NULL; int nb=0, i=0; char *val; MEDIA_DBR mr; db_lock(ua->db); /* We can restrict to a specific pool */ if ((i = find_arg_with_value(ua, "pool")) >= 0) { POOL_DBR pdbr; memset(&pdbr, 0, sizeof(pdbr)); bstrncpy(pdbr.Name, ua->argv[i], sizeof(pdbr.Name)); if (!db_get_pool_record(ua->jcr, ua->db, &pdbr)) { ua->error_msg("%s", db_strerror(ua->db)); goto bail_out; } Mmsg(query, " AND PoolId = %lld ", (int64_t) pdbr.PoolId); pm_strcat(filter, query.c_str()); } /* We can restrict by MediaType */ if (((i = find_arg_with_value(ua, "mediatype")) >= 0) && (strlen(ua->argv[i]) <= MAX_NAME_LENGTH)) { char ed1[MAX_ESCAPE_NAME_LENGTH]; db_escape_string(ua->jcr, ua->db, ed1, ua->argv[i], strlen(ua->argv[i])); Mmsg(query, " AND MediaType = '%s' ", ed1); pm_strcat(filter, query.c_str()); } /* Use a limit */ if ((i = find_arg_with_value(ua, "limit")) >= 0) { if (is_an_integer(ua->argv[i])) { Mmsg(query, " LIMIT %s ", ua->argv[i]); pm_strcat(filter, query.c_str()); } else { ua->error_msg(_("Expecting limit argument as integer\n")); goto bail_out; } } lst = New(alist(5, owned_by_alist)); Mmsg(query, expired_volumes[db_get_type_index(ua->db)], filter.c_str()); db_sql_query(ua->db, query.c_str(), db_string_list_handler, &lst); foreach_alist(val, lst) { nb++; memset(&mr, 0, sizeof(mr)); bstrncpy(mr.VolumeName, val, sizeof(mr.VolumeName)); db_get_media_record(ua->jcr, ua->db, &mr); Mmsg(query, _("Volume \"%s\""), val); if (confirm_retention(ua, &mr.VolRetention, query.c_str())) { prune_volume(ua, &mr); } } ua->send_msg(_("%d expired volume%s found\n"), nb, nb>1?"s":""); ok = true; bail_out: db_unlock(ua->db); if (lst) { delete lst; } return ok; } /* * Prune a given Volume */ bool prune_volume(UAContext *ua, MEDIA_DBR *mr) { POOL_MEM query(PM_MESSAGE); struct del_ctx del; bool ok = false; int count; if (mr->Enabled == 2) { return false; /* Cannot prune archived volumes */ } memset(&del, 0, sizeof(del)); del.max_ids = 10000; del.JobId = (JobId_t *)malloc(sizeof(JobId_t) * del.max_ids); db_lock(ua->db); /* Prune only Volumes with status "Full", or "Used" */ if (strcmp(mr->VolStatus, "Full") == 0 || strcmp(mr->VolStatus, "Used") == 0) { Dmsg2(100, "get prune list MediaId=%lu Volume %s\n", mr->MediaId, mr->VolumeName); count = get_prune_list_for_volume(ua, mr, &del); Dmsg1(100, "Num pruned = %d\n", count); if (count != 0) { ua->info_msg(_("Found %d Job(s) associated with the Volume \"%s\" that will be pruned\n"), count, mr->VolumeName); purge_job_list_from_catalog(ua, del); } else { ua->info_msg(_("Found no Job associated with the Volume \"%s\" to prune\n"), mr->VolumeName); } ok = is_volume_purged(ua, mr); } db_unlock(ua->db); if (del.JobId) { free(del.JobId); } return ok; } /* * Get prune list for a volume */ int get_prune_list_for_volume(UAContext *ua, MEDIA_DBR *mr, del_ctx *del) { POOL_MEM query(PM_MESSAGE); int count = 0; utime_t now, period; char ed1[50], ed2[50]; if (mr->Enabled == 2) { return 0; /* cannot prune Archived volumes */ } /* * Now add to the list of JobIds for Jobs written to this Volume */ edit_int64(mr->MediaId, ed1); period = mr->VolRetention; now = (utime_t)time(NULL); edit_int64(now-period, ed2); Mmsg(query, sel_JobMedia, ed1, ed2); Dmsg3(250, "Now=%d period=%d now-period=%s\n", (int)now, (int)period, ed2); Dmsg1(100, "Query=%s\n", query.c_str()); if (!db_sql_query(ua->db, query.c_str(), file_delete_handler, (void *)del)) { if (ua->verbose) { ua->error_msg("%s", db_strerror(ua->db)); } Dmsg0(100, "Count failed\n"); goto bail_out; } count = exclude_running_jobs_from_list(del); bail_out: return count; } /* * We have a list of jobs to prune or purge. If any of them is * currently running, we set its JobId to zero which effectively * excludes it. * * Returns the number of jobs that can be prunned or purged. * */ int exclude_running_jobs_from_list(del_ctx *prune_list) { int count = 0; JCR *jcr; bool skip; int i; /* Do not prune any job currently running */ for (i=0; i < prune_list->num_ids; i++) { skip = false; foreach_jcr(jcr) { if (jcr->JobId == prune_list->JobId[i]) { Dmsg2(100, "skip running job JobId[%d]=%d\n", i, (int)prune_list->JobId[i]); prune_list->JobId[i] = 0; skip = true; break; } } endeach_jcr(jcr); if (skip) { continue; /* don't increment count */ } Dmsg2(100, "accept JobId[%d]=%d\n", i, (int)prune_list->JobId[i]); count++; } return count; }
/** * An abstract panel for editing a transport item. */ @SuppressWarnings("serial") public abstract class TransportItemEditingPanel extends JPanel { protected static Simulation sim = Simulation.instance(); protected static UnitManager unitManager = sim.getUnitManager(); protected static MarsClock marsClock = sim.getMasterClock().getMarsClock(); protected static SimulationConfig simulationConfig = SimulationConfig.instance(); protected static SettlementConfig settlementConfig = simulationConfig.getSettlementConfiguration(); protected static PersonConfig personConfig = simulationConfig.getPersonConfig(); // Data members private Transportable transportItem; /** * Constructor. * @param transportItem the transport item to edit. */ public TransportItemEditingPanel(Transportable transportItem) { // Use JPanel constructor super(); // Initialize data members. this.transportItem = transportItem; } /** * Gets the transport item. * @return transport item. */ public Transportable getTransportItem() { return transportItem; } /** * Modifies the transport item with the editing panel information. */ public abstract boolean modifyTransportItem(); /** * Creates the transport item with the editing panel information. */ public abstract boolean createTransportItem(); }
import { isFunction } from '../../misc/is/is-function'; import { IHTMLTemplate } from './template.type'; export function isHTMLTemplate<GTemplateArgument extends object>( value: unknown, ): value is IHTMLTemplate<GTemplateArgument> { return isFunction(value); }
<reponame>macguruGithub/spring-io-custom-start-spring-io<gh_stars>0 package io.spring.start.site.extension.dependency.jenkins; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Path; import java.nio.file.Paths; import java.nio.file.StandardCopyOption; import io.spring.initializr.generator.project.contributor.ProjectContributor; public class JenkinsScriptProjectContributor implements ProjectContributor { @Override public void contribute(Path projectRoot) throws IOException { Path jenkinsScriptsBuildImgDirectory = projectRoot.resolve("jenkins/build-img"); Files.createDirectories(jenkinsScriptsBuildImgDirectory); Path jenkinsScriptsPushImgDirectory = projectRoot.resolve("jenkins/push-img"); Files.createDirectories(jenkinsScriptsPushImgDirectory); Path jenkinsScriptsFilepath = projectRoot.resolve("Jenkinsfile"); Files.createFile(jenkinsScriptsFilepath); Path src_jenkinsScriptsFilepath = Paths.get("src/main/resources/config/jenkinsScripts/Jenkinsfile"); Files.copy(src_jenkinsScriptsFilepath, jenkinsScriptsFilepath, StandardCopyOption.REPLACE_EXISTING); Path jenkinsScriptsBuildImgFilepath_build = projectRoot.resolve("jenkins/build-img/build.sh"); Files.createFile(jenkinsScriptsBuildImgFilepath_build); Path src_jenkinsScriptsBuildImgFilepath_build = Paths .get("src/main/resources/config/jenkinsScripts/build-img/build.sh"); Files.copy(src_jenkinsScriptsBuildImgFilepath_build, jenkinsScriptsBuildImgFilepath_build, StandardCopyOption.REPLACE_EXISTING); Path jenkinsScriptsBuildImgFilepath_docker = projectRoot.resolve("jenkins/build-img/Dockerfile"); Files.createFile(jenkinsScriptsBuildImgFilepath_docker); Path src_jenkinsScriptsBuildImgFilepath_docker = Paths .get("src/main/resources/config/jenkinsScripts/build-img/Dockerfile"); Files.copy(src_jenkinsScriptsBuildImgFilepath_docker, jenkinsScriptsBuildImgFilepath_docker, StandardCopyOption.REPLACE_EXISTING); Path jenkinsScriptsBuildImgFilepath_mvm = projectRoot.resolve("jenkins/build-img/mvn.sh"); Files.createFile(jenkinsScriptsBuildImgFilepath_mvm); Path src_jenkinsScriptsBuildImgFilepath_mvm = Paths .get("src/main/resources/config/jenkinsScripts/build-img/mvn.sh"); Files.copy(src_jenkinsScriptsBuildImgFilepath_mvm, jenkinsScriptsBuildImgFilepath_mvm, StandardCopyOption.REPLACE_EXISTING); Path jenkinsScriptsPushImgFilepath = projectRoot.resolve("jenkins/push-img/push.sh"); Files.createFile(jenkinsScriptsPushImgFilepath); Path src_jenkinsScriptsPushImgFilepath = Paths.get("src/main/resources/config/jenkinsScripts/push-img/push.sh"); Files.copy(src_jenkinsScriptsPushImgFilepath, jenkinsScriptsPushImgFilepath, StandardCopyOption.REPLACE_EXISTING); } }
package bundle import ( "archive/tar" "compress/gzip" "encoding/json" "fmt" "io" "io/ioutil" "os" "path/filepath" "strings" "github.com/ghodss/yaml" "github.com/pkg/errors" "github.com/sirupsen/logrus" "k8s.io/helm/pkg/chartutil" "k8s.io/helm/pkg/proto/hapi/chart" "github.com/kyma-project/kyma/components/helm-broker/internal" ) const ( bundleChartDirName = "chart" bundleMetaName = "meta.yaml" bundleDocsMetaPath = "docs/meta.yaml" bundlePlanDirName = "plans" bundlePlanMetaName = "meta.yaml" bundlePlaSchemaCreateJSONName = "create-instance-schema.json" bundlePlanSchemaBindJSONName = "bind-instance-schema.json" bundlePlanSchemaUpdateJSONName = "update-instance-schema.json" bundlePlanValuesFileName = "values.yaml" bundlePlanBindTemplateFileName = "bind.yaml" maxSchemaLength = 65536 // 64 k ) // Loader provides loading of bundles from repository and representing them as bundles and charts. type Loader struct { tmpDir string loadChart func(name string) (*chart.Chart, error) createTmpDir func(dir, prefix string) (name string, err error) log logrus.FieldLogger } // NewLoader returns new instance of Loader. func NewLoader(tmpDir string, log logrus.FieldLogger) *Loader { return &Loader{ tmpDir: tmpDir, loadChart: chartutil.Load, createTmpDir: ioutil.TempDir, log: log.WithField("service", "bundle:loader"), } } // Load takes stream with compressed tgz archive as io.Reader, tries to unpack it to tmp directory, // and then loads it as bundle and Helm chart func (l *Loader) Load(in io.Reader) (*internal.Bundle, []*chart.Chart, error) { unpackedDir, err := l.unpackArchive(l.tmpDir, in) if err != nil { return nil, nil, err } cleanPath := filepath.Clean(unpackedDir) if strings.HasPrefix(cleanPath, l.tmpDir) { defer os.RemoveAll(unpackedDir) } else { defer l.log.Warnf("directory %s was not deleted because its name does not resolve to expected path", unpackedDir) } return l.loadDir(unpackedDir) } // LoadDir takes uncompressed chart in specified directory and loads it. func (l Loader) LoadDir(path string) (*internal.Bundle, []*chart.Chart, error) { return l.loadDir(path) } func (l Loader) loadDir(path string) (*internal.Bundle, []*chart.Chart, error) { c, err := l.loadChartFromDir(path) if err != nil { return nil, nil, errors.Wrap(err, "while loading chart") } form, err := l.createFormFromBundleDir(path) if err != nil { return nil, nil, errors.Wrap(err, "while mapping buffered files to form") } if err := form.Validate(); err != nil { return nil, nil, errors.Wrap(err, "while validating form") } yb, err := form.ToModel(c) if err != nil { return nil, nil, errors.Wrap(err, "while mapping form to model") } return &yb, []*chart.Chart{c}, nil } func (l Loader) loadChartFromDir(baseDir string) (*chart.Chart, error) { // In current version we have only one chart per bundle // in future version we will have some loop over each plan to load all charts chartPath, err := l.discoverPathToHelmChart(baseDir) if err != nil { return nil, errors.Wrapf(err, "while discovering the name of the Helm Chart under the %q bundle directory", bundleChartDirName) } c, err := l.loadChart(chartPath) switch { case err == nil: case os.IsNotExist(err): return nil, errors.New("bundle does not contains \"chart\" directory") default: return nil, errors.Wrap(err, "while loading chart") } return c, nil } // discoverPathToHelmChart returns the full path to the Helm Chart directory from `bundleChartDirName` folder // // - if more that one directory is found then error is returned // - if additional files are found under the `bundleChartDirName` directory then // they are ignored but logged as warning to improve transparency. func (l Loader) discoverPathToHelmChart(baseDir string) (string, error) { cDir := filepath.Join(baseDir, bundleChartDirName) rawFiles, err := ioutil.ReadDir(cDir) switch { case err == nil: case os.IsNotExist(err): return "", errors.Errorf("bundle does not contains %q directory", bundleChartDirName) default: return "", errors.Wrapf(err, "while reading directory %s", cDir) } directories, files := splitForDirectoriesAndFiles(rawFiles) if len(directories) == 0 { return "", fmt.Errorf("%q directory SHOULD contain one Helm Chart folder but it's empty", bundleChartDirName) } if len(directories) > 1 { return "", fmt.Errorf("%q directory MUST contain only one Helm Chart folder but found multiple directories: [%s]", bundleChartDirName, strings.Join(directories, ", ")) } if len(files) != 0 { // ignoring by design l.log.Warningf("Found files: [%s] in %q bundle directory. All are ignored.", strings.Join(files, ", "), bundleChartDirName) } chartFullPath := filepath.Join(cDir, directories[0]) return chartFullPath, nil } func splitForDirectoriesAndFiles(rawFiles []os.FileInfo) (dirs []string, files []string) { for _, f := range rawFiles { if f.IsDir() { dirs = append(dirs, f.Name()) } else { files = append(files, f.Name()) } } return dirs, files } func (l Loader) createFormFromBundleDir(baseDir string) (*form, error) { f := &form{Plans: make(map[string]*formPlan)} bundleMetaFile, err := ioutil.ReadFile(filepath.Join(baseDir, bundleMetaName)) switch { case err == nil: case os.IsNotExist(err): return nil, fmt.Errorf("missing metadata information about bundle, please check if bundle contains %q file", bundleMetaName) default: return nil, errors.Wrapf(err, "while reading %q file", bundleMetaName) } if err := yaml.Unmarshal(bundleMetaFile, &f.Meta); err != nil { return nil, errors.Wrapf(err, "while unmarshaling bundle %q file", bundleMetaName) } bundleDocsFile, err := ioutil.ReadFile(filepath.Join(baseDir, bundleDocsMetaPath)) if err != nil && !os.IsNotExist(err) { return nil, errors.Wrapf(err, "while reading %q file", bundleDocsMetaPath) } if err := yaml.Unmarshal(bundleDocsFile, &f.DocsMeta); err != nil { return nil, errors.Wrapf(err, "while unmarshaling bundle %q file", bundleDocsMetaPath) } plansPath := filepath.Join(baseDir, bundlePlanDirName) files, err := ioutil.ReadDir(plansPath) switch { case err == nil: case os.IsNotExist(err): return nil, fmt.Errorf("bundle does not contains any plans, please check if bundle contains %q directory", bundlePlanDirName) default: return nil, errors.Wrapf(err, "while reading %q file", bundleMetaName) } for _, fileInfo := range files { if fileInfo.IsDir() { planName := fileInfo.Name() f.Plans[planName] = &formPlan{} if err := l.loadPlanDefinition(filepath.Join(plansPath, planName), f.Plans[planName]); err != nil { return nil, err } } } return f, nil } func (Loader) loadPlanDefinition(path string, plan *formPlan) error { topdir, err := filepath.Abs(path) if err != nil { return err } unmarshalPlanErr := func(err error, filename string) error { return errors.Wrapf(err, "while unmarshaling plan %q file", filename) } if err := yamlUnmarshal(topdir, bundlePlanMetaName, &plan.Meta, true); err != nil { return unmarshalPlanErr(err, bundlePlanMetaName) } if err := yamlUnmarshal(topdir, bundlePlanValuesFileName, &plan.Values, false); err != nil { return unmarshalPlanErr(err, bundlePlanValuesFileName) } if plan.SchemasCreate, err = loadPlanSchema(topdir, bundlePlaSchemaCreateJSONName, false); err != nil { return unmarshalPlanErr(err, bundlePlaSchemaCreateJSONName) } if plan.SchemasBind, err = loadPlanSchema(topdir, bundlePlanSchemaBindJSONName, false); err != nil { return unmarshalPlanErr(err, bundlePlanSchemaBindJSONName) } if plan.SchemasUpdate, err = loadPlanSchema(topdir, bundlePlanSchemaUpdateJSONName, false); err != nil { return unmarshalPlanErr(err, bundlePlanSchemaUpdateJSONName) } if plan.BindTemplate, err = loadRaw(topdir, bundlePlanBindTemplateFileName, false); err != nil { return errors.Wrapf(err, "while loading plan %q file", bundlePlanBindTemplateFileName) } return nil } // unpackArchive unpack from a reader containing a compressed tar archive to tmpdir. func (l Loader) unpackArchive(baseDir string, in io.Reader) (string, error) { dir, err := l.createTmpDir(baseDir, "unpacked-bundle") if err != nil { return "", err } unzipped, err := gzip.NewReader(in) if err != nil { return "", err } defer unzipped.Close() tr := tar.NewReader(unzipped) Unpack: for { header, err := tr.Next() switch err { case nil: case io.EOF: break Unpack default: return "", err } // the target location where the dir/file should be created target := filepath.Join(dir, header.Name) // check the file type switch header.Typeflag { // its a dir and if it doesn't exist - create it case tar.TypeDir: if _, err := os.Stat(target); os.IsNotExist(err) { if err := os.MkdirAll(target, 0755); err != nil { return "", err } } // it's a file - create it case tar.TypeReg: if err := l.createFile(target, header.Mode, tr); err != nil { return "", err } } } return dir, nil } func (Loader) createFile(target string, mode int64, r io.Reader) error { f, err := os.OpenFile(target, os.O_CREATE|os.O_RDWR, os.FileMode(mode)) if err != nil { return err } defer f.Close() // copy over contents if _, err := io.Copy(f, r); err != nil { return err } return nil } func yamlUnmarshal(basePath, fileName string, unmarshalTo interface{}, required bool) error { b, err := ioutil.ReadFile(filepath.Join(basePath, fileName)) switch { case err == nil: case os.IsNotExist(err) && !required: return nil case os.IsNotExist(err) && required: return fmt.Errorf("%q is required but is not present", fileName) default: return err } return yaml.Unmarshal(b, unmarshalTo) } func loadPlanSchema(basePath, fileName string, required bool) (*internal.PlanSchema, error) { b, err := ioutil.ReadFile(filepath.Join(basePath, fileName)) switch { case err == nil: case os.IsNotExist(err) && !required: return nil, nil case os.IsNotExist(err) && required: return nil, fmt.Errorf("%q is required but is not present", fileName) default: return nil, errors.Wrap(err, "while loading plan schema") } // OSB API defines: Schemas MUST NOT be larger than 64kB. // See: https://github.com/openservicebrokerapi/servicebroker/blob/v2.13/spec.md#schema-object if len(b) >= maxSchemaLength { return nil, fmt.Errorf("schema %s is larger than 64 kB", fileName) } var schema internal.PlanSchema err = json.Unmarshal(b, &schema) return &schema, errors.Wrap(err, "while loading plan shcema") } func loadRaw(basePath, fileName string, required bool) ([]byte, error) { b, err := ioutil.ReadFile(filepath.Join(basePath, fileName)) switch { case err == nil: case os.IsNotExist(err) && !required: return nil, nil case os.IsNotExist(err) && required: return nil, fmt.Errorf("%q is required but is not present", fileName) default: return nil, err } return b, nil }
/** * Contains commit information about a branch and its base revision. */ class Branch { /** * The commits to the branch */ private final ConcurrentSkipListMap<Revision, BranchCommit> commits; /** * The initial base revision of this branch. */ private final RevisionVector base; /** * The branch reference. */ private final BranchReference ref; /** * Create a new branch instance with an initial set of commits and a given * base revision. The life time of this branch can be controlled with * the {@code guard} parameter. Once the {@code guard} object becomes weakly * reachable, the {@link BranchReference} for this branch is appended to * the passed {@code queue}. No {@link BranchReference} is appended if the * passed {@code guard} is {@code null}. * * @param commits the initial branch commits. * @param base the base commit. * @param queue a {@link BranchReference} to this branch will be appended to * this queue when {@code guard} becomes weakly reachable. * @param guard controls the life time of this branch. * @throws IllegalArgumentException if base is a branch revision. */ Branch(@NotNull SortedSet<Revision> commits, @NotNull RevisionVector base, @NotNull ReferenceQueue<Object> queue, @Nullable Object guard) { checkArgument(!checkNotNull(base).isBranch(), "base is not a trunk revision: %s", base); this.base = base; this.commits = new ConcurrentSkipListMap<Revision, BranchCommit>(commits.comparator()); for (Revision r : commits) { this.commits.put(r.asBranchRevision(), new BranchCommitImpl(base, r.asBranchRevision())); } if (guard != null) { this.ref = new BranchReference(queue, this, guard); } else { this.ref = null; } } /** * @return the initial base of this branch. This is a trunk revision. */ @NotNull RevisionVector getBase() { return base; } /** * Returns the base revision for the given branch revision <code>r</code>. * * @param r revision of a commit in this branch. * @return the base revision for <code>r</code>. * @throws IllegalArgumentException if <code>r</code> is not a commit of * this branch. */ @NotNull RevisionVector getBase(@NotNull Revision r) { BranchCommit c = commits.get(checkNotNull(r).asBranchRevision()); if (c == null) { throw new IllegalArgumentException( "Revision " + r + " is not a commit in this branch"); } return c.getBase(); } /** * Rebases the last commit of this branch to the given revision. * * @param head the new head of the branch. * @param base rebase to this revision. * @throws IllegalArgumentException if head is a trunk revision or base is a * branch revision. */ void rebase(@NotNull Revision head, @NotNull RevisionVector base) { checkArgument(checkNotNull(head).isBranch(), "Not a branch revision: %s", head); checkArgument(!checkNotNull(base).isBranch(), "Not a trunk revision: %s", base); Revision last = commits.lastKey(); checkArgument(head.compareRevisionTime(last) > 0); commits.put(head, new RebaseCommit(base, head, commits)); } /** * Adds a new commit with revision <code>r</code> to this branch. * * @param r the revision of the branch commit to add. * @throws IllegalArgumentException if r is not a branch revision. */ void addCommit(@NotNull Revision r) { checkArgument(checkNotNull(r).isBranch(), "Not a branch revision: %s", r); Revision last = commits.lastKey(); checkArgument(commits.comparator().compare(r, last) > 0); commits.put(r, new BranchCommitImpl(commits.get(last).getBase(), r)); } /** * @return the commits to this branch. */ SortedSet<Revision> getCommits() { return commits.keySet(); } /** * @return <code>true</code> if this branch contains any commits; * <code>false</code> otherwise. */ boolean hasCommits() { return !commits.isEmpty(); } /** * Checks if this branch contains a commit with the given revision. * * @param r the revision of a commit. * @return <code>true</code> if this branch contains a commit with the given * revision; <code>false</code> otherwise. */ boolean containsCommit(@NotNull Revision r) { return commits.containsKey(checkNotNull(r).asBranchRevision()); } /** * Returns the branch commit with the given or {@code null} if it does not * exist. * * @param r the revision of a commit. * @return the branch commit or {@code null} if it doesn't exist. */ @Nullable BranchCommit getCommit(@NotNull Revision r) { return commits.get(checkNotNull(r).asBranchRevision()); } /** * @return the branch reference or {@code null} if no guard object was * passed to the constructor of this branch. */ @Nullable BranchReference getRef() { return ref; } /** * Removes the commit with the given revision <code>r</code>. Does nothing * if there is no such commit. * * @param r the revision of the commit to remove. * @throws IllegalArgumentException if r is not a branch revision. */ public void removeCommit(@NotNull Revision r) { checkArgument(checkNotNull(r).isBranch(), "Not a branch revision: %s", r); commits.remove(r); } /** * Applies all unsaved modification of this branch to the given collection * of unsaved trunk modifications with the given merge commit revision. * * @param trunk the unsaved trunk modifications. * @param mergeCommit the revision of the merge commit. */ public void applyTo(@NotNull UnsavedModifications trunk, @NotNull Revision mergeCommit) { checkNotNull(trunk); for (BranchCommit c : commits.values()) { c.applyTo(trunk, mergeCommit); } } /** * Gets the most recent unsaved last revision at <code>readRevision</code> * or earlier in this branch for the given <code>path</code>. Documents with * explicit updates are not tracked and this method may return {@code null}. * * @param path the path of a node. * @param readRevision the read revision. * @return the most recent unsaved last revision or <code>null</code> if * there is none in this branch. */ @Nullable public Revision getUnsavedLastRevision(Path path, Revision readRevision) { readRevision = readRevision.asBranchRevision(); for (Revision r : commits.descendingKeySet()) { if (readRevision.compareRevisionTime(r) < 0) { continue; } BranchCommit c = commits.get(r); if (c.isModified(path)) { return r; } } return null; } /** * @param rev the revision to check. * @return {@code true} if the given revision is the head of this branch, * {@code false} otherwise. */ public boolean isHead(@NotNull Revision rev) { checkArgument(checkNotNull(rev).isBranch(), "Not a branch revision: %s", rev); return checkNotNull(rev).equals(commits.lastKey()); } /** * Returns the modified paths since the base revision of this branch until * the given branch revision {@code r} (inclusive). * * @param r a commit on this branch. * @return modified paths until {@code r}. * @throws IllegalArgumentException if r is not a branch revision. */ Iterable<Path> getModifiedPathsUntil(@NotNull final Revision r) { checkArgument(checkNotNull(r).isBranch(), "Not a branch revision: %s", r); if (!commits.containsKey(r)) { return Collections.emptyList(); } Iterable<Iterable<Path>> paths = transform(filter(commits.entrySet(), new Predicate<Map.Entry<Revision, BranchCommit>>() { @Override public boolean apply(Map.Entry<Revision, BranchCommit> input) { return !input.getValue().isRebase() && input.getKey().compareRevisionTime(r) <= 0; } }), new Function<Map.Entry<Revision, BranchCommit>, Iterable<Path>>() { @Override public Iterable<Path> apply(Map.Entry<Revision, BranchCommit> input) { return input.getValue().getModifiedPaths(); } }); return Iterables.concat(paths); } /** * Information about a commit within a branch. */ abstract static class BranchCommit implements LastRevTracker { protected final RevisionVector base; protected final Revision commit; BranchCommit(RevisionVector base, Revision commit) { this.base = base; this.commit = commit; } /** * @return the branch base for this branch commit. */ RevisionVector getBase() { return base; } abstract void applyTo(UnsavedModifications trunk, Revision commit); abstract boolean isModified(Path path); abstract Iterable<Path> getModifiedPaths(); protected abstract boolean isRebase(); } /** * Implements a regular branch commit. */ private static class BranchCommitImpl extends BranchCommit { private final Set<Path> modifications = Sets.newHashSet(); BranchCommitImpl(RevisionVector base, Revision commit) { super(base, commit); } @Override void applyTo(UnsavedModifications trunk, Revision commit) { for (Path p : modifications) { trunk.put(p, commit); } } @Override boolean isModified(Path path) { // TODO: rather pass NodeDocument? return modifications.contains(path); } @Override Iterable<Path> getModifiedPaths() { return modifications; } @Override protected boolean isRebase() { return false; } //------------------< LastRevTracker >---------------------------------- @Override public void track(Path path) { modifications.add(path); } @Override public String toString() { return "B (" + modifications.size() + ")"; } } private static class RebaseCommit extends BranchCommit { private final NavigableMap<Revision, BranchCommit> previous; RebaseCommit(RevisionVector base, Revision commit, NavigableMap<Revision, BranchCommit> previous) { super(base, commit); this.previous = squash(previous); } @Override void applyTo(UnsavedModifications trunk, Revision commit) { for (BranchCommit c : previous.values()) { c.applyTo(trunk, commit); } } @Override boolean isModified(Path path) { for (BranchCommit c : previous.values()) { if (c.isModified(path)) { return true; } } return false; } @Override protected boolean isRebase() { return true; } @Override Iterable<Path> getModifiedPaths() { Iterable<Iterable<Path>> paths = transform(previous.values(), new Function<BranchCommit, Iterable<Path>>() { @Override public Iterable<Path> apply(BranchCommit branchCommit) { return branchCommit.getModifiedPaths(); } }); return Iterables.concat(paths); } /** * Filter out the RebaseCommits as they are just container of previous BranchCommit * * @param previous branch commit history * @return filtered branch history only containing non rebase commits */ private static NavigableMap<Revision, BranchCommit> squash(NavigableMap<Revision, BranchCommit> previous) { NavigableMap<Revision, BranchCommit> result = new TreeMap<Revision, BranchCommit>(previous.comparator()); for (Map.Entry<Revision, BranchCommit> e : previous.entrySet()){ if (!e.getValue().isRebase()){ result.put(e.getKey(), e.getValue()); } } return result; } //------------------< LastRevTracker >---------------------------------- @Override public void track(Path path) { throw new UnsupportedOperationException("RebaseCommit is read-only"); } @Override public String toString() { return "R (" + previous.size() + ")"; } } final static class BranchReference extends WeakReference<Object> { private final Branch branch; private BranchReference(@NotNull ReferenceQueue<Object> queue, @NotNull Branch branch, @NotNull Object referent) { super(checkNotNull(referent), queue); this.branch = checkNotNull(branch); } Branch getBranch() { return branch; } } }
#include "bml_utilities_distributed2d.h" #include "bml_allocate_distributed2d.h" #include "../bml_introspection.h" #include "../bml_utilities.h" #include "../bml_allocate.h" #include "../bml_parallel.h" #include "../bml_submatrix.h" #include "../bml_copy.h" #include "../ellblock/bml_types_ellblock.h" #include "../ellblock/bml_allocate_ellblock.h" #include "../bml_logger.h" #include <string.h> #include <assert.h> void bml_read_bml_matrix_distributed2d( bml_matrix_distributed2d_t * A, char *filename) { // create a big matrix that can store all the blocks int B_NB; int B_MB; int lnb; int *bsizes; bml_matrix_t *Alocal = bml_get_local_matrix(A); bml_matrix_t *B; switch (bml_get_type(A->matrix)) { // special case for ellblock: we need block sizes to exactly // match block sizes of local matrices case ellblock: lnb = ((bml_matrix_ellblock_t *) Alocal)->NB; B_NB = lnb * A->npcols; B_MB = ((bml_matrix_ellblock_t *) Alocal)->MB * A->npcols; bsizes = bml_noinit_allocate_memory(B_NB * sizeof(int)); for (int p = 0; p < A->npcols; p++) memcpy(bsizes + p * lnb, ((bml_matrix_ellblock_t *) Alocal)->bsize, lnb * sizeof(int)); B = bml_block_matrix_ellblock(A->matrix_precision, B_NB, B_MB, A->M, bsizes, sequential); break; default: B = bml_zero_matrix(bml_get_type(A->matrix), A->matrix_precision, A->N, A->M, sequential); break; } // read data into "big" matrix B by one task only if (A->mpitask == 0) bml_read_bml_matrix(B, filename); bml_mpi_bcast_matrix(B, 0, A->comm); // extract local submatrix out of replicated "big" B matrix int irow = A->myprow * A->n; int icol = A->mypcol * A->n; bml_matrix_t *C = bml_extract_submatrix(B, irow, icol, A->n, A->M / A->npcols); bml_copy(C, A->matrix); bml_deallocate(&B); bml_deallocate(&C); } void bml_write_bml_matrix_distributed2d( bml_matrix_distributed2d_t * A, char *filename) { int B_NB; int B_MB; int lnb; int *bsizes; bml_matrix_t *Alocal = bml_get_local_matrix(A); // task 0 collects all blocks and write matrix if (A->mpitask == 0) { bml_matrix_t *B; // create a big matrix that can store all the blocks switch (bml_get_type(A->matrix)) { // special case for ellblock: we need block sizes to exactly // match block sizes of local matrices case ellblock: lnb = ((bml_matrix_ellblock_t *) Alocal)->NB; B_NB = lnb * A->npcols; B_MB = ((bml_matrix_ellblock_t *) Alocal)->MB * A->npcols; bsizes = bml_noinit_allocate_memory(B_NB * sizeof(int)); // block sizes for "big" matrix made of block sizes // of local matrices for (int p = 0; p < A->npcols; p++) memcpy(bsizes + p * lnb, ((bml_matrix_ellblock_t *) Alocal)->bsize, lnb * sizeof(int)); B = bml_block_matrix_ellblock(A->matrix_precision, B_NB, B_MB, A->M, bsizes, sequential); break; default: B = bml_noinit_matrix(bml_get_type(A->matrix), A->matrix_precision, A->N, A->M, sequential); break; } // assign "local" matrices into "big" matrixe bml_assign_submatrix(B, A->matrix, 0, 0); int C_N = A->n; int C_M = A->M / A->npcols; for (int itask = 1; itask < A->ntasks; itask++) { bml_matrix_t *C = bml_noinit_matrix(bml_get_type(A->matrix), A->matrix_precision, C_N, C_M, sequential); bml_mpi_recv(C, itask, A->comm); int irow = (itask / A->npcols) * C_N; int icol = (itask % A->npcols) * C_N; bml_assign_submatrix(B, C, irow, icol); bml_deallocate(&C); } // print "big" matrix containing all the distributed submatrices bml_write_bml_matrix(B, filename); bml_deallocate(&B); } else { // send local submatrix to task 0 bml_mpi_send(A->matrix, 0, A->comm); } }
def find_gt_from_files_copy(img_location, gt_folder, gt_new_folder): files = [] gt_files = [] for dirname, dirnames, filenames in os.walk(img_location): for filename in filenames: files.append(filename) print('files = ', files) print('len(files) = ', len(files)) for dirname, dirnames, filenames in os.walk(gt_folder): for filename in filenames: gt_files.append(dirname + '/' + filename) print('gt_files = ', gt_files) print('len(gt_files) = ', len(gt_files)) count = 0 for file in files: for gt_file in gt_files: name = file.split('.')[0] gt_name = (gt_file.split('.'))[0].split('/')[-1] if name == gt_name: shutil.copy2(gt_file, gt_new_folder) print(gt_file) count += 1 print('count = ', count)
package medo.demo.spring.core; @FunctionalInterface public interface TransactionRunnable { void run(); }
// PutState puts the specified `key` and `value` into the transaction's // writeset as a data-write proposal. PutState doesn't effect the ledger // until the transaction is validated and successfully committed. // Simple keys must not be an empty string and must not start with null // character (0x00), in order to avoid range query collisions with // composite keys, which internally get prefixed with 0x00 as composite // key namespace. // key := args[0] // value := args[1] func (asc *AsenaSmartContract) PutState(stub shim.ChaincodeStubInterface, args []string) peer.Response { asc.Stats.Shared.PutState++ if len(args) != 2 { asc.Stats.Shared.Errors++ return shim.Error("AsenaSmartContract.PutState(): expecting 2 arguments") } asc.Log(shim.LogDebug, "AsenaSmartContract.PutState(): called with arguments: %s %s %s", args[0], args[1]) m := make(map[string]interface{}) err := json.Unmarshal([]byte(args[1]), &m) if err != nil { asc.Stats.Shared.Errors++ return shim.Error("AsenaSmartContract.PutState(): json.Unmarshal() failed: " + err.Error()) } ValueAsBytes, err := json.Marshal(m) if err != nil { asc.Stats.Shared.Errors++ return shim.Error("AsenaSmartContract.PutState(): json.Marshal() failed: " + err.Error()) } err = stub.PutState(args[0], ValueAsBytes) if err != nil { asc.Stats.Shared.Errors++ return shim.Error("AsenaSmartContract.PutState(): stub.PutState() failed: " + err.Error()) } asc.Log(shim.LogDebug, "AsenaSmartContract.PutState(): returning success") asc.Stats.Shared.Success++ return shim.Success([]byte(args[0])) }
def kalman_filter_detector(spec, spec_std, sig_t, A_0=None, sig_0=None): if A_0 is None: A_0 = spec.mean() if sig_0 is None: sig_0 = np.median(spec_std) spec = spec - np.mean(spec) cur_mu, cur_state_v = A_0, sig_0**2 cur_log_l = 0 for i in range(len(spec)): cur_z = spec[i] cur_spec_v = spec_std[i]**2 cur_log_l += -(cur_z-cur_mu)**2 / (cur_state_v + cur_spec_v + sig_t**2)/2 - 0.5*np.log(2*np.pi*(cur_state_v + cur_spec_v + sig_t**2)) cur_mu = (cur_mu / cur_state_v + cur_z/cur_spec_v) / (1/cur_state_v + 1/cur_spec_v) cur_state_v = cur_spec_v * cur_state_v / (cur_spec_v + cur_state_v) + sig_t**2 H_0_log_likelihood = -np.sum(spec**2 / spec_std**2 / 2) - np.sum(0.5*np.log(2*np.pi * spec_std**2)) return cur_log_l - H_0_log_likelihood
/*========================================================================= Program: Visualization Toolkit Module: vtkQuadric.h Copyright (c) Ken Martin, Will Schroeder, Bill Lorensen All rights reserved. See Copyright.txt or http://www.kitware.com/Copyright.htm for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ // .NAME vtkQuadric - evaluate implicit quadric function // .SECTION Description // vtkQuadric evaluates the quadric function F(x,y,z) = a0*x^2 + a1*y^2 + // a2*z^2 + a3*x*y + a4*y*z + a5*x*z + a6*x + a7*y + a8*z + a9. vtkQuadric is // a concrete implementation of vtkImplicitFunction. #ifndef vtkQuadric_h #define vtkQuadric_h #include "vtkCommonDataModelModule.h" // For export macro #include "vtkImplicitFunction.h" class VTKCOMMONDATAMODEL_EXPORT vtkQuadric : public vtkImplicitFunction { public: vtkTypeMacro(vtkQuadric,vtkImplicitFunction); void PrintSelf(ostream& os, vtkIndent indent); // Description // Construct quadric with all coefficients = 1. static vtkQuadric *New(); // Description // Evaluate quadric equation. double EvaluateFunction(double x[3]); double EvaluateFunction(double x, double y, double z) {return this->vtkImplicitFunction::EvaluateFunction(x, y, z); } ; // Description // Evaluate the gradient to the quadric equation. void EvaluateGradient(double x[3], double g[3]); // Description // Set / get the 10 coefficients of the quadric equation. void SetCoefficients(double a[10]); void SetCoefficients(double a0, double a1, double a2, double a3, double a4, double a5, double a6, double a7, double a8, double a9); vtkGetVectorMacro(Coefficients,double,10); protected: vtkQuadric(); ~vtkQuadric() {} double Coefficients[10]; private: vtkQuadric(const vtkQuadric&); // Not implemented. void operator=(const vtkQuadric&); // Not implemented. }; #endif
def sync_type(ks_name, type_model, connection=None): if not _allow_schema_modification(): return if not issubclass(type_model, UserType): raise CQLEngineException("Types must be derived from base UserType.") _sync_type(ks_name, type_model, connection=connection)
/** * This processor does nothing. It exists to show that one can use beans as * processors. * * @author Michael J. Simons * * @since 2016-12-15 */ @Component public class DefaultResourcePreProcessor implements ResourcePreProcessor { @Override public void process(Resource resource, Reader reader, Writer writer) throws IOException { throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates. } }
def execute_clean_step(self, task, step): return execute_step(task, step, 'clean')
<filename>java401codechallenges/src/main/java/java401codechallenges/MultibracketValidation.java<gh_stars>0 package java401codechallenges; import java.util.HashMap; import java.util.HashSet; public class MultibracketValidation { private Stack<Character> stack; public MultibracketValidation() { this.stack = new Stack(); } public boolean multiBracketValidation(String input){ if(input.length() == 1) return false; HashSet<Character> openingBrackets = new HashSet<>(); openingBrackets.add('{'); openingBrackets.add('['); openingBrackets.add('('); HashMap<Character, Character> pairs = new HashMap<>(); pairs.put(']', '['); pairs.put('}', '{'); pairs.put(')', '('); for(int i = 0; i < input.length(); i++) { if(openingBrackets.contains(input.charAt(i))){ stack.push(input.charAt(i)); } else { if(stack.peek() == null || (pairs.containsKey(input.charAt(i)) && !stack.peek().equals(pairs.get(input.charAt(i))))) return false; else if (pairs.containsKey(input.charAt(i)) && stack.peek().equals(pairs.get(input.charAt(i)))) stack.pop(); } } return true; } }
package list import ( "github.com/downflux/game/engine/curve/curve" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" gcpb "github.com/downflux/game/api/constants_go_proto" ) type List struct { curves map[gcpb.EntityProperty]curve.Curve properties []gcpb.EntityProperty } func New(curves []curve.Curve) (*List, error) { l := &List{ curves: map[gcpb.EntityProperty]curve.Curve{}, } for _, c := range curves { propertyType := c.Property() if _, found := l.curves[propertyType]; found { return nil, status.Errorf(codes.FailedPrecondition, "duplicate key %v in list of Curve instances", propertyType) } l.curves[propertyType] = c l.properties = append(l.properties, propertyType) } return l, nil } // Curve returns a Curve instance of a specific mutable property, // e.g. HP or position. // // TODO(minkezhang): Decide if we should return default value. func (l *List) Curve(propertyType gcpb.EntityProperty) curve.Curve { return l.curves[propertyType] } // Properties returns list of entity properties defined in a specific // list. func (l *List) Properties() []gcpb.EntityProperty { return l.properties }
Theoretical modelling of high pressure argon arc radiation A one-dimensional, axisymmetric, high-pressure argon arc was modelled theoretically, and its radiation spectrum was calculated numerically from the equations of heat conductivity and radiation transfer. For a pressure of P = 15 atm, the coefficients of absorption in argon were calculated for the continuum and line spectra. The coefficient of radiative heat conductivity was found, subject to the assumptions that the continuum optical depth is less than unity while the line optical depth is much more than unity. The general heat conductivity was found by neglecting convection, but taking into account radiative conductance and kinetic conductance by ions, electrons and neutral atoms. The radial distributions of the temperature, ionization degree and electrical conductivity were found for arcs with currents I = 880, 990 and 1250 A confined in a transparent tube of radius R = 0.01 m. The degree of ionization was calculated assuming local thermodynamic equilibrium. An effective temperature was calculated in the spectral region of 300–3000 nm. For I = 880 and 1250 A, the effective temperatures were found to be 7500 and 8700 K and the maximum spectral radiant intensities were 7.5×10−8 and 10.5×10−8 W m−2 Hz−1, respectively.
. The authors report a familial case of carnitine insufficiency presenting in two out of seven children as a severe, isolated, hypertrophic and hypokinetic cardiomyopathy. The etiology was confirmed by histological study and measurement of carnitine concentrations in the blood and muscle. The evolution was spectacular with specific therapy. Left ventricular hypokinesia regressed completely within 18 months (fractional fibre shortening increased from 10 to 33% and the SCI from 26 to 55% in the more severe of the two cases). Hypertrophy and dilatation decreased significantly. This is a so-called intermediary form of carnitine insufficiency and very unusual because of the isolated cardiac involvement. These cases underline the value of systematic muscle biopsy with measurement of carnitine concentrations in the investigation of all cases of supposed primary cardiomyopathy, especially as a rapid improvement can be obtained by specific replacement therapy.
def onnx_similarity(model_1, model_2): graph_1 = model_1.graph graph_2 = model_2.graph input_same = (len(graph_1.input) == len(graph_2.input)) if input_same: input_1 = {item_1.name: item_1.type for item_1 in graph_1.input} input_2 = {item_2.name: item_2.type for item_2 in graph_2.input} for name_1, type_1 in input_1.items(): if name_1 not in input_2: input_same = False else: input_same = input_same and (input_2[name_1] == type_1) if not input_same: break check_flag = 'Pass!' if input_same else 'Fail!' logger.info(f'Check input ... {check_flag}') node_same = (len(graph_1.node) == len(graph_2.node)) if node_same: for node_1, node_2 in zip(graph_1.node, graph_2.node): node_same = node_same and (node_1.op_type == node_2.op_type) node_same = node_same and (len(node_1.input) == len(node_2.input)) node_same = node_same and (len(node_1.output) == len(node_2.output)) node_same = node_same and (node_1.attribute == node_2.attribute) if not node_same: break check_flag = 'Pass!' if node_same else 'Fail!' logger.info(f'Check node ... {check_flag}') output_same = (len(graph_1.output) == len(graph_2.output)) if output_same: for item_1, item_2 in zip(graph_1.output, graph_2.output): output_same = output_same and (item_1.type == item_2.type) if not output_same: break check_flag = 'Pass!' if output_same else 'Fail!' logger.info(f'Check output ... {check_flag}') init_same = (len(graph_1.initializer) == len(graph_2.initializer)) check_flag = 'Pass!' if init_same else 'Fail!' logger.info(f'Check length of initializer ... {check_flag}') return input_same and output_same and node_same and init_same
<gh_stars>0 import React from 'react'; import { Card, Space, Typography } from 'antd'; import { ProjProgress, ProjProgressProps } from './ProjProgress'; import { HashRouter, Link } from 'react-router-dom'; import { CampaignData } from '../CampaignPage'; import { useContractCall } from '@usedapp/core'; import { Interface } from '@ethersproject/contracts/node_modules/@ethersproject/abi/lib/interface'; import { abi as CampaignABI } from "../truffleenv/build/contracts/Campaign.json" const campaignIface = new Interface(CampaignABI); export const ProjPreview: React.FC<CampaignData> = ({symbolFirst, currencySymbol, description, title, contract}) => { const contractData = useContractCall({abi: campaignIface, address: contract, method: "getStats", args: []}); const [target, raised, endDate] = contractData ? contractData : [0, 0, 0] return ( <HashRouter> <Link to={'campaigns/'+contract} > <Card style={{ "width": "85%", height: "inherit", "marginLeft": "auto", "marginRight":"auto"}}> <Typography.Title level={3}>{title}</Typography.Title> <Typography.Paragraph ellipsis={{rows: 3}} > {description ? description : "No description provided."} </Typography.Paragraph> <div style={{width: "100%"}}> <Space size="middle" direction="vertical" style={{"width": "inherit", "display": "block"}}> <ProjProgress {...{target, raised, endDate, symbolFirst, currencySymbol} as ProjProgressProps} /> </Space> </div> </Card> </Link> </HashRouter> )};
def main(): import argparse example_doc = """\ examples: 1. Returns plots for all datasets in the original report: $ python result.py 2. Only prints results and plots for dataset 2 (for every normalization method): $ python result.py -d=2 3. Only prints results and plots for dataset 2 and normalization 1: $ python result.py --dataset=2 --norm=1 """ parser = argparse.ArgumentParser( usage="python %(prog)s [options]", description="Performs Linear Regression and Regression Trees algorithms", epilog=example_doc, formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( "-n", "--norm", choices=[0, 1, 2, 3], type=int, help="Chooses which normalization to apply (optional). If you choose '0', then " "you would apply the minmax scaler. If you choose '1', " "then you would apply the z-normalization. If you choose '2', " "you would apply the polynomial features scaler first, and then " "the minmax scaler. If you choose '4', then you would apply " "the polynomial features scaler first, and then the z-normalization " "scaler. By default, if no specific case is select, prints all results.", ) parser.add_argument( "-d", "--dataset", choices=[0, 1, 2], type=int, help="Decides which dataset to use for reporting results. " "Options are %(choices)s (0: winequality-white, 1: winequality-red," " 2: housing)", ) args = parser.parse_args() if args.dataset is not None: if args.norm is not None: mae_tables = test_mae(args.dataset, args.norm) for i, model in enumerate(models): print("MODEL : " + model) print(tabulate(mae_tables[i], headers='keys', tablefmt='psql')) test_plot(mae_tables) else: print("Dataset: ", database.data_base[args.dataset]) for j in range(4): print("Normalization method: ", j) mae_tables = test_mae(args.dataset, j) for i, model in enumerate(models): print("MODEL: ", model) print(tabulate(mae_tables[i], headers='keys', tablefmt='psql')) test_plot(mae_tables) test_all_norms(args.dataset) else: if args.norm is not None: print(example_doc) else: test_explore()
David is speaking tomorrow and Monday at New Living Expo with an all-star lineup — including Rev. Michael Beckwith, Don Miguel Ruiz, Giorgio Tsoukalous, William Henry, Dannion Brinkley, Foster Gamble and Corey Goode. We have received major new intel updates from Pete Peterson on the Antarctic Atlantis. We had hoped to get it properly written up for you, with all the ancillary research data, before this trip but we just need a little more time! There are many exciting developments going on behind the scenes. We have never been more busy, nor more inspired! [PLEASE NOTE: We are currently having a weird text size glitch. Use the buttons at the top of this screen to set it to the size you want. Thanks!] STUNNING NEW BRIEFINGS JUST CAME IN I wish I had time to properly write this up before heading out to San Francisco for the New Living Expo — but I have hit the proverbial brick wall here in terms of timing. I will do the best I can to give you an initial overview. Pete Peterson has finally been authorized to release far more intel than ever before. Something has changed on the inside to make this possible. The current situation in the world may seem very hopeless and muddled, when in fact we appear to be right on the brink of incredible new developments. I am going to do the classic meta-analysis complete with all the research links and insider data for you to read and enjoy soon enough. I just ran out of time before heading out tomorrow morning by car. We also have one hell of a Santa Ana windstorm going on here in the Topanga mountains at the moment, adding great drama to this writing process. Pete gave me a stunning depth of new information on excavations of UFO crashes in Antarctica that were already being thoroughly analyzed by teams of scientists when he got there 30 years ago. The data was so dense it was almost too much. This is what happens when you have “eggheads” dedicating their entire lives to studying how a gigantic, miles-wide mothership skidded along the ground before coming to a stop. FOUR LAYERS DEEP In short, if we combine the latest intel from Corey Goode with what Pete Peterson was aware of from his work there 30 years ago, we appear to have four layers of awesome stuff in Antarctica that we will soon be hearing about in an official disclosure. These layers are spread out in a total of about three and a half miles of vertical space, from bottom to top. The first, oldest, bottom layer is from the Ancient Builder Race. It could be 1.8 billion years old, if not more. It is a vast, underground facility inside the continental landmass of what is now Antarctica, beneath the ice. There are ancient underground bases like this all throughout planets and moons in our solar system, as well as several neighboring ones. This was a vast civilization. That doesn’t make their ruins on Earth any less impressive. Antarctica is the main “X marks the spot” treasure room for Earth. On the surfaces of planets and moons they left behind crystalline pyramids, obelisks and domes made of a transparent aluminum alloy. Most of the surviving remains like this are very thoroughly eroded, creating what one insider called a “Cosmic Junkyard.” The build-outs inside planets and moons have been preserved much more efficiently. They contain super-advanced technology that is transdimensional, and still works. This is what allowed various beings to put themselves into stasis and fast-forward through tens, if not hundreds of thousands of years of time. THE SECOND LAYER The second layer contains a huge mothership that crash-landed on tropical-looking trees on the surface of the Antarctic continent, as it would look under the ice. Crushed remains of palm, pineapple and other such trees can still be found beneath this gigantic and highly impressive craft — way ahead of what we have in our current, ‘accepted’ technology. The science breakthroughs we will get from analyzing and reverse-engineering what is on this ship will give us an incredible leap forward in technology. This ship is now buried beneath over three miles of glacial ice. When it crash-landed on the Antarctic continent, it was all tropical land at the time. The mothership was estimated to be about 450,000 years old as of 30 years ago, when Pete was there studying it along with many other scientists. THE MARS CIVILIZATION According to Corey’s data, it is now believed to be a little over 500,000 years old. This dates it to the time immediately before the civilization that had lived on Mars, when it was still Earth-like, was destroyed. NASA announced that Mars was like Earth, and had an ocean that was up to a mile and a half deep across half of its surface, just a few years ago. Few people have talked about it even though the announcement spread far and wide at the time. As several insiders have revealed, Mars was a moon of a Super-Earth during this time. The Super-Earth exploded in a great war, becoming the Asteroid Belt. Remember… if this sounds crazy to you, I just wrote an entire book summarizing all my research into this far-reaching insider testimony. THE ANTARCTIC ATLANTIS This was all discussed in my new video The Antarctic Atlantis, which you should definitely check out if you haven’t already seen it. At the time of this writing, the video is at a little over 530,000 views and the channel has 91,000 subscribers. So close! Please subscribe to the youtube.com/davidwilcock333 channel if you haven’t already done so, as we will have far more access to YouTube staff once we cross the 100K line. Within another month or two, we will significantly increase the amount of video content we will be putting out this way, including robust live shoots with multiple camera angles and visual support. Your subscription guarantees you won’t miss any new releases once they come out. This has taken a great deal of time, effort and investment to prepare for, and we are finally just about ready to launch. It took me a solid month of full-time work to get this video to the point where I was happy enough to release it: TOTALLY PHAT, DUD3! I had gained a lot of winter weight during this time that I have since predominantly lost. I majorly stepped up my work on body-building since seeing some of the avalanche of fat-shaming comments that came in from this video. I wish I could say “I don’t give a crap” about all the mountains of hate that people write. I have learned not to mind it as much, but I still have feelings. Other than occasional pull-ups that would lead to neck injuries, I really wasn’t doing any kind of weightlifting up until I was publicly humiliated for this video. I estimate within a few more months the difference will be quite noticeable, based on how things are already changing. I like how it looks and how I feel. I set a goal “to build upper-body strength” in a high-school program called Discovery nearly 30 years ago, and I am finally doing it. NOT THE GOVERNMENT, JUST AFTER EFFECTS As a humorous aside, some people have said “the government” must be paying me and giving me support to have made this video, due to its production value. Not! In a roundabout way, I am flattered. The truth is far less dramatic. I have worked my butt off for three years to learn how to program and render After Effects templates that are customizable. That’s a lot of time spent sitting in a chair. All day. Every day. Look at what happens when you do! You pick the templates that you like and add your own images and text in. It can be hard work, but once you learn After Effects it is definitely doable. Most of these templates cost somewhere between 28 to 50 bucks, and look amazing. You can get matching cinematic music for another 10 to 12 bucks. A majority of them are so-so, but if you are really patient you can find diamonds in the rough. THE PRICE YOU’VE GOTTA PAY The cost is very cheap once you acquire the skills to use these templates. The time it takes to become knowledgeable enough to make videos out of them is extremely costly. There is no way to “halfway” learn After Effects. You either have to learn the whole damn thing, stem to stern, or you will be hopelessly lost. That takes years. I had a much stronger CG expert who is a friend of mine help me with two of the templates: the scroll that says “THE ANTARCTIC ATLANTIS” at the beginning and the “solar flash” animation at the end, which we custom-built from scratch. We would have done more, but only a few days after we started working together, he got a full-time job and had to take it. Neither of those sequences would have looked right with the skills I have now. That’s where team-building and affiliation becomes very useful. THE EMPIRE Since George Lucas was authorized to release a wealth of true intel in the Star Wars series, gleaned from research that was already well underway in the Secret Space Program, I call this ancient Mars civilization The Empire. All of this is discussed in my new book The Ascension Mysteries. The first half is a personal history of my childhood and teenage years, answering a widespread request to learn more about my bizarre and interesting past. The vast majority of written reviews are five stars, remarkably enough. A few people are horrified that I talk about myself in the first half, and act like everyone agrees with them. Narcissist! Ego! Migod! If you have trouble with it, just skip ahead to chapter 13. The rest will be there if you change your mind later on. They are really akin to two different books. Many reviews say the second half of the book is more than worth the purchase price alone. I DID NOT WANT TO WRITE ABOUT THE PERSONAL STUFF I wrote this once before, but it seems that no one read it. Here it is: I did not want to write about my personal life. At all. This was not fun. I had massive, sweating panic attacks from putting my personal information out there like this. I had multiple dreams that ordered me to do this, saying it was of critical importance in helping others understand the Ascension process. This is not narcissism or ego. At all. This is baring my soul on the sacrificial altar and holding nothing back, knowing the malevolence of that very vocal one percent of readers. I do my best to maintain perspective and understand that from 1998 to 2003, I personally contacted every single person who wrote hate about me online. In all but one case, I was able to turn them into my friends by contacting them directly. That one case was so upsetting that I had to discontinue the practice. Sadly, I could not even begin to attempt an operation like this today. The demand for personal contact is vastly greater than I could ever hope to meet. A COSMIC HISTORY OF OUR SOLAR SYSTEM The second half of The Ascension Mysteries explores all the insider data I encountered about UFOs, beginning in my sophomore year of college and continuing right straight through to the present. It is essentially a cosmic history of our solar system, dense with scholarship and research links in the second half. I am pleased with how it turned out. In case you missed the announcement, the UK / Australia / NZ / S Africa version is now available! LET’S NOT INTERRUPT THE FLOW… Instead of hating people in this field, let’s do our best to be supportive of each other — even if we do not agree on everything. We may strongly dislike the Cabal — for very valid reasons — but one thing we cannot deny is that they got where they are through an incredibly vast team effort. Here we are in this field, each fighting over their own little tract of land — while the readers and viewers are equally apt to bicker with one another. The Alliance is very real. They were preparing to take decisive action against the Cabal, regardless of who won the 2016 US election. The current president is not running the show in some sort of weird dictatorship fashion, contrary to media projections. Many seemingly mysterious things will make sense once the truth comes out. I am not saying that I like or support everything that is going on. Not at all. However, I also try to look for the positive in every situation. Yet, you open yourself up to awesome waves of hatred to even try to say one thing positive about this situation — at least from certain readers. ALLIANCE VERSUS CABAL There are many tangible signs that a visible war is playing out between the DoD / Alliance and the CIA / corporate media / Cabal factions in America right now. The Alliance is definitely going to win this. It is a mathematical certainty. They have much greater numbers and they are growing by the day. The “Vault 7” CIA Wikileaks documents were supposed to be the “Next Snowden,” but that story vanished within one or two days. Why would the CIA be doing the same surveillance as the NSA? Simply put, this is another tool the Cabal has been using to smash any dissenters, would-be whistleblowers or Alliance members. Expect far more to come out about how this data has been used for blackmail, bribery and other tools the Cabal used to maintain control. LET’S NOT BE QUICK TO ATTACK THE ALLIANCE It is easy for us to be “Monday Morning Quarterbacks” and complain about the Alliance not doing enough. Real people — brave warriors — are dying in ever-increasing numbers as this war reaches its crescendo. They deserve our respect, honor and support. They are putting their lives on the line while the rest of us safely argue with each other about whether they even exist, or are doing enough. All the Cabal is doing now is trying to delay the inevitable. They know they are finished. They are just refusing to surrender until the very bitter end. They have already thrown the timetables the Alliance had hoped to meet way off, but they can’t hold on forever. LET’S REVIEW THE FIRST TWO LAYERS IN ANTARCTICA Earlier rounds of the Alliance’s work have included a massive sweep of underground bases worldwide, purging the Cabal’s holdouts in multiple locations. The Alliance is now taking out the last-remaining Cabal proxy armies in the outside, visible world. This is scaring and pissing off a lot of people. Once the Cabal’s ability to conduct “false flag” military strikes is sufficiently destroyed, expect a raft of shocking disclosures to emerge. I have no idea in what order we will get all the Antarctica data. I do apparently have a fairly decent idea of what there is to know. The bottom layer we are excavating and exploring is a super-advanced underground base beneath the continental crust that is 1.8-plus billion years old. The second layer is a huge UFO mothership crash on the surface of the continental crust, on top of tropical vegetation, from about 500,000 years ago. The Empire was aware of the incredible resources available down there. Other ships landed in this same area to explore it as well. THE THIRD LAYER The third layer contains a ship that crashed more recently, trying to land above the original ship and ancient underground base. This ship is roughly two miles above the previous ship. In this case it landed on snow, leaving a long debris trail of smashed parts. The original ship had long since been buried under two miles of glacial ice. It was a forgotten ruin until the survivors of the Empire desperately needed it. Antarctica still had some habitable land on it during this time, but the ship crashed on snow-covered mountains in the highlands, directly above the older ship. Peterson was hired to find out why these ships were crashing instead of landing smoothly. His conclusion was that they were using magnetic propulsion and were disrupted by unique conditions in the polar region. Although pole shifts did take place, this area apparently still remained predominantly polar. THE PRE-ADAMITES During Peterson’s time it was estimated that this crash took place between 220 and 100 thousand years ago. Based on Corey’s intel, it seems that the consensus is now that this second crash happened 55,000 years ago. This was the group that had elongated skulls and became known as the Pre-Adamites. They were desperate. They tunneled down through two miles of ice to access the original ship, as it was their last chance. They were in survival mode. They were able to recover much of their lost technology and return their original ship to a semi-operational state. They then cannibalized much of these ships and used the parts to build a new base to operate from on the surface of the ice. The motherships became havens, and they were too damaged to be able to fly away again. Peterson noted that the symbols found inside the first ship matched nicely with the more recent one. Both appear to be from the same culture, though the second ship shows signs that their technology had become further advanced. THE FOURTH AND FINAL LAYER The Pre-Adamite base seems to have survived and thrived from 55,000 to about 12,500 years ago. This was when the earth shifted yet again on its axis, driving the continent even further into the polar region, and freezing out the rest of it. The second ship was buried under another mile or so of ice. Its secrets were entombed, awaiting our own modern technology to uncover it. According to Peterson, another far more recent ET group attempted to land there and access this site. This was estimated to be 800 years ago. Other intel suggests it could be about 2000 years since this happened. This ship is far smaller and is more akin to the type we see at Roswell. It was nearly on the surface of the ice and was the first one we found. Again, the very powerful magnetic fluctuations in the polar region seem to account for why all of these ships crashed instead of making smooth landings. THAT’S IT! OK… that’s as much as I can do for now on the data. There are many more specifics than what I have covered here so far. If I don’t go to bed soon I won’t be in good shape for the conference. I hope to do a new video about all of this soon enough. It is very, very interesting stuff. Some of the new data will make it into the conferences as well. If this does get disclosed, the world will change like never before. It will be the most significant event in modern history. One last thing Pete shared is that a malevolent ET group, probably the Draco, is now demanding disclosure. They want us to know that they ‘control’ the earth. This is no longer the case — and that may be why they are freaking out. Things are about to get very interesting and the signs are already visible. NEW SEASON OF ANCIENT ALIENS STARTED TONIGHT (FRIDAY!) To round this out, I am happy to report that a new season of Ancient Aliens just started tonight. Traditionally April is always a launch month for new seasons. Here’s the link to the Season 12 promo, which features three clips of yours truly: http://www.history.com/shows/ancient-aliens/videos/ancient-aliens-new-season-preview?playlist_slug=ancient-aliens-season-12-preview Hang on a minute… GIGANTIC BLOCKS OF STONE!!! OK… I feel much better now. If you are hearing this in a robo voice, you probably did not get that joke. Please do your part and report the fraud to YouTube. We have scored huge victories against video fraud, as we will discuss in future updates. The Teddy Bear was defeated by Adpocalypse. “David Wilcock” no longer earns ad revenues on YouTube thanks to it being considered Fake News. Hooray! The UFOSWLG YouTube channel is still a heavy perpetrator of copyright infringement. If you own this site, we do not grant permission for our copywritten content to be used. It is much better for you to make your own videos where you respond to and discuss this information instead of plagiarizing it. ENTIRE EPISODES HAVE BEEN INSPIRED As you may know, I am a Consulting Producer on Ancient Aliens due to the significant amount of research data I contribute — sometimes leading to entire episodes built around that content. Traditionally we shoot 12 episodes per season, and every Friday a new one will air — starting tonight. The Secret Space Program has made it into at least one of the new episodes this season. That episode will nicely establish the back-story for what is going on with provable data. THREE NEW SHOWS A WEEK When you include Wisdom Teachings and Cosmic Disclosure on Gaia, I will be featured in three new TV shows for you to enjoy per week for the next three months. That’s still not as intense as people running daily or nightly shows during the week — but considering the nature and depth of the content it is awesome! I have been involved in a major, months-long scientific proof of an imminent, DNA-changing solar flash event on Wisdom Teachings — so if you haven’t seen it in a while, I highly recommend tuning in. Cosmic Disclosure continues to bring new insiders forward as well, and our production values are getting better and better as Gaia adds new resources. NEW LIVING EXPO IN SAN FRANCISCO Lastly, I only have two events scheduled for the remainder of this year, despite crushing, almost daily demand for public appearances. I already travel so much that I am striving for more time at home, more stability and more continuity. I definitely work as much as I can with what I have. The first event is New Living Expo, this Sunday and Monday. The second — and last — is Contact in the Desert, coming up on the third weekend of May. This weekend, I am speaking directly after Corey Goode on Sunday, from 5PM to 7PM in Room 6. Corey’s talk is from 2:30 to 4:30 in the same room. We are just two of the speakers in the overall lineup, which includes Rev. Michael Beckwith, Don Miguel Ruiz, Giorgio Tsoukalous, William Henry, Dannion Brinkley, Foster Gamble, Barbara Marx Hubbard, Daniel Pinchbeck, Deborah King, and Gail Thackray among others. My talk will focus on the Solar Flash that is expected to happen some time in our near future. The best estimates now seem to be positioning it as occuring three to five years from now. No one knows for sure. I will be taking many of the exciting new research threads I have been exploring on Wisdom Teachings and weaving them together into a single two-hour presentation of the facts. You can click here to read a much fuller description of what this event will entail. Bear in mind that the crowd for this is huge, and if you want to get a chance to meet me in person, the Monday event is far more likely to work. THE MONDAY EVENT: ASCENSION MASTERY — PREPARING THE WAY My Monday event begins at 6:30 PM, so you don’t have to miss work to see the show. In this case I will be talking about what we need to do to prepare for this stunning event, if it is indeed going to happen — and I believe it definitely will. This will be a three-hour tour-de-force of science and spirituality, geared towards making sure your boarding passes are printed and stamped! Your support of these events insures that we can continue progressing towards solutions that will release a lot more content free of charge in the near future. ADPOCALYPSE WASN’T SO BAD… None of our videos can earn ad revenues on YouTube or even have ads in them — despite Gaia being ready and willing to pay for those slots. This is the side-effect of the “Fake News” boycott where the Cabal is de-monetizing anything they see as opposition. YouTubers are calling this “Adpocalypse” and it has ruined the careers of hundreds of thousands, if not millions of people. This forces people to start selling products in order to survive — the very thing that makes commenters attack them even worse. I anticipated something like this all along and have honestly never made a cent from YouTube ads. Not once. Any monetized videos were bootlegs. I am very happy that the “Teddy Bear” I wrote about in December has lost the financial incentive to steal and re-post my content on an industrial scale. Now when I make a video, it should be seen by far more people since it won’t be drowned out in an avalanche of old, recycled and misleading content. There are still a few Ted channels left, including this one, “Reptilian Agenda.” Please report them when you find them. Your support makes all of this possible, and I thank you! Soon I will be in San Francisco, ready to git ‘er done!
class Job: """Azure Quantum Job that is submitted to a given Workspace. :param workspace: Workspace instance to submit job to :type workspace: Workspace :param job_details: Job details model, contains Job ID, name and other details :type job_details: JobDetails """ def __init__(self, workspace: "Workspace", job_details: JobDetails): self.workspace = workspace self.details = job_details self.id = job_details.id self.results = None def refresh(self): """Refreshes the Job's details by querying the workspace.""" self.details = self.workspace.get_job(self.id).details def has_completed(self): return ( self.details.status == "Succeeded" or self.details.status == "Failed" or self.details.status == "Cancelled" ) def wait_until_completed(self, max_poll_wait_secs=30): """Keeps refreshing the Job's details until it reaches a finished status.""" self.refresh() poll_wait = 0.2 while not self.has_completed(): logger.debug( f"Waiting for job {self.id}," + f"it is in status '{self.details.status}'" ) print(".", end="", flush=True) time.sleep(poll_wait) self.refresh() poll_wait = ( max_poll_wait_secs if poll_wait >= max_poll_wait_secs else poll_wait * 1.5 ) def get_results(self): if self.results is not None: return self.results if not self.has_completed(): self.wait_until_completed() if not self.details.status == "Succeeded": raise RuntimeError( f'{"Cannot retrieve results as job execution failed"}' + f"(status: {self.details.status}." + f"error: {self.details.error_data})" ) url = urlparse(self.details.output_data_uri) if url.query.find("se=") == -1: # output_data_uri does not contains SAS token, # get sas url from service blob_client = BlobClient.from_blob_url( self.details.output_data_uri ) blob_uri = self.workspace._get_linked_storage_sas_uri( blob_client.container_name, blob_client.blob_name ) payload = download_blob(blob_uri) else: # output_data_uri contains SAS token, use it payload = download_blob(self.details.output_data_uri) result = json.loads(payload.decode("utf8")) return result def matches_filter( self, name_match: str = None, status: Optional[JobStatus] = None, created_after: Optional[datetime] = None ) -> bool: """Checks if job (self) matches the given properties if any. :param name_match: regex expression for job name matching :param status: filter by job status :param created_after: filter jobs after time of job creation """ if name_match is not None and re.search(name_match, self.details.name) is None: return False if status is not None and self.details.status != status.value: return False if created_after is not None and self.details.creation_time.replace(tzinfo=timezone.utc) < created_after.replace(tzinfo=timezone.utc): return False return True @staticmethod def create_job_id() -> str: """Create a unique id for a new job.""" return str(uuid.uuid1())
Huge Variety of Nuclides That Arise in the LENR Processes: Attempt at Explanation LENR studies have shown a wide variety of manifestations of this phenomenon. It manifests itself in metals with hydrogen dissolved in them, in plasma, in gas discharge, in electrolysis, and even in biological systems. In addition to energy release, which far exceeds the capabilities of chemical reactions, LENR is characterized by a huge variety of emerging chemical elements. The report provides examples of the appearance of many initially missing elements in different LENR installations. For example, in the nickel-hydrogen LENR reactor created in our laboratory, which worked for 7 months, Ca, V, Ti, Mn, Fe, Co, Cu, Zn, Ga, Ba, Sr, Yb, Hf were found. Moreover, the appearance of new elements is found not only in the "fuel" but also in the surrounding matter. The huge variety of chemical elements that arise can be explained by the fact that in the processes of LENR, the interaction covers several atoms at once. This can be an interaction initiated by neutrinos (antineutrinos) of very low energies, since such particles have a de Broglie wavelength (the size of the interaction region) much larger than the interatomic distances in condensed matter. Huge fluxes of neutrino-antineutrino pairs are generated in metals and dense plasmas by thermal collisions of electrons with atoms at a sufficiently high temperature. Another possible agent that causes collective nuclear transmutations is probably the light magnetic monopole (the magnetically excited state of neutrinos).
/* * Copyright (c) 2014, Facebook, Inc. * All rights reserved. * * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. An additional grant * of patent rights can be found in the PATENTS file in the same directory. * */ #pragma once #include <proxygen/lib/http/codec/compress/HPACKConstants.h> #include <proxygen/lib/http/codec/compress/HeaderTable.h> #include <proxygen/lib/http/codec/compress/StaticHeaderTable.h> namespace proxygen { class HPACKContextImpl { public: static bool isStatic(uint32_t index, size_t staticTableSize) { return index < staticTableSize; } static uint32_t globalToDynamicIndex(uint32_t index, size_t staticTableSize) { return index - staticTableSize; } static uint32_t globalToStaticIndex(uint32_t index) { return index; } static uint32_t dynamicToGlobalIndex(uint32_t index, size_t staticTableSize) { return index + staticTableSize; } static uint32_t staticToGlobalIndex(uint32_t index) { return index; } static uint32_t getIndex(const HPACKHeader& header, const HeaderTable& staticTable, const HeaderTable& dynamicTable) { uint32_t index = staticTable.getIndex(header); if (index) { return staticToGlobalIndex(index); } index = dynamicTable.getIndex(header); if (index) { return dynamicToGlobalIndex(index, staticTable.size()); } return 0; } static uint32_t nameIndex(const std::string& name, const HeaderTable& staticTable, const HeaderTable& dynamicTable) { uint32_t index = staticTable.nameIndex(name); if (index) { return staticToGlobalIndex(index); } index = dynamicTable.nameIndex(name); if (index) { return dynamicToGlobalIndex(index, staticTable.size()); } return 0; } }; }
<reponame>zhangzhen1979/JIMU-ConvertVideo<gh_stars>1-10 package com.thinkdifferent.convertvideo.consumer; import com.thinkdifferent.convertvideo.config.RabbitMQConfig; import com.thinkdifferent.convertvideo.service.ConvertVideoService; import com.thinkdifferent.convertvideo.task.Task; import net.sf.json.JSONObject; import org.springframework.amqp.rabbit.annotation.RabbitListener; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.stereotype.Component; @Component public class ConvertVideoConsumer { @Autowired private Task task; @Autowired private ConvertVideoService convertVideoService; /** * 队列消费者-转换MP4文件。启动多线程任务,处理队列中的消息 * * @param strData 队列中放入的JSON字符串 */ @RabbitListener(queues = RabbitMQConfig.QUEUE_RECEIVE) public void receiveTodoRequestByMap(String strData){ try{ if(RabbitMQConfig.consumer){ JSONObject jsonData = JSONObject.fromObject(strData); task.doTask(convertVideoService, jsonData); // Thread.currentThread().join(); } } catch (Exception e) { e.printStackTrace(); } } }
import string def solve(): alp = string.ascii_lowercase mydict = dict(zip(alp, [False] * len(alp))) s = input() + '#' prev = '#' cnt = 0 for c in s: if c != prev: if cnt % 2 == 1: mydict[prev] = True prev = c cnt = 0 cnt += 1 for c in mydict: if mydict[c]: print(c, end = '') print() t = int(input()) while t > 0: solve() t -= 1
def _simple_phrase(self, key, quoted=True): if not type(key) == str: key = key[0] if self[key] == None: return for unquoted in ['retention', 'size', 'bytes', 'address', bacula_tools.COMMENT]: if unquoted in key.lower(): quoted = False try: int(self[key]) value = self[key] except: if quoted: value = '"' + self[key].strip() + '"' else: value = self[key] if key.lower() == bacula_tools.COMMENT: value = '# %s' % value self.output.insert(-1, '%s%s = %s' % (self.prefix, key.capitalize().replace('_', ' '), value)) return
def contains_non_vegan_ingredients(ingredients_to_check): ingredients_to_check = [i.strip().lower().replace(" ", "") for i in ingredients_to_check] non_vegan_list = [i.lower().strip().replace(" ","") for i in get_non_vegan_list()] return [i for i in ingredients_to_check if i in non_vegan_list]
<filename>base/src/main/java/com/yxlg/base/util/FileUtil.java /* * FileUtil.java * * Created Date: 2016年11月15日 * * Copyright (c) Yuandian Technologies Co., Ltd. * * This software is the confidential and proprietary information of * Yuandian Technologies Co., Ltd. ("Confidential Information"). You shall not * disclose such Confidential Information and shall use it only in accordance * with the terms of the license agreement you entered into with * Yuandian Technologies Co., Ltd. */ package com.yxlg.base.util; import java.io.File; import java.io.FileOutputStream; import java.io.InputStream; /** * @author Michael.Sun * 2016年11月15日 * @version <br> * <p>文件处理工具类</p> */ public class FileUtil { /** * 根据inputStream创建文件 * @param inputStream * @param fileName * fileName包含完整文件路径 * @return */ public static File createFileThroughInputStream(InputStream inputStream , String fileName){ FileOutputStream fileOutputStream; try { File file = new File(fileName); fileOutputStream = new FileOutputStream(file); int temp = 0; while((temp = inputStream.read()) != -1){ fileOutputStream.write(temp); } fileOutputStream.flush(); fileOutputStream.close(); return file; } catch (Exception e) { throw new BusinessException("创建文件异常", e); } } }
<gh_stars>0 use std::net::IpAddr; use std::time::SystemTime; // Poll code use std::time::Duration; use tallystick::schulze::SchulzeTally; use tallystick::schulze::Variant; use tallystick::RankedCandidate; use crate::error::ErrorKind; #[derive(Debug, PartialEq, Clone)] pub struct RankedChoiceVote { /// idx 0 is 1st choice, etc pub ranked_choices: Vec<String>, pub voter_ip: IpAddr, } #[derive(Debug, PartialEq, Clone, Copy)] pub enum VotingMethod { Schulze, } #[derive(Debug, PartialEq, Clone)] pub struct Poll { pub id: String, pub title: String, pub description: String, pub candidates: Vec<String>, /// Seconds since the Epoch pub creation_time: u64, /// Seconds since the Epoch pub end_time: u64, pub votes: Vec<RankedChoiceVote>, pub num_winners: usize, pub winners: Option<Vec<RankedCandidate<String>>>, pub method: VotingMethod, pub prohibit_double_vote_by_ip: bool, } impl Poll { pub fn new( id: Option<String>, title: String, description: String, candidates: Vec<String>, length: Duration, num_winners: usize, prohibit_double_vote_by_ip: bool, ) -> Result<Self, ErrorKind> { let id = id.unwrap_or_else(|| format!("{:016x}", rand::random::<u64>())); let creation_time = SystemTime::now() .duration_since(SystemTime::UNIX_EPOCH) .expect("could not get system time") .as_secs(); let end_time = creation_time + length.as_secs(); Ok(Self { id, title, description, candidates, creation_time, end_time, votes: Vec::new(), num_winners, winners: None, method: VotingMethod::Schulze, prohibit_double_vote_by_ip, }) } /// Finds the winners pub fn find_winners(&self) -> Result<Vec<RankedCandidate<String>>, ErrorKind> { let winners = match self.method { VotingMethod::Schulze => { let mut tally = SchulzeTally::<String, u64>::new(self.num_winners, Variant::Winning); for candidate in &self.candidates { tally.add_candidate(candidate.clone()); } for vote in &self.votes { tally.add(&vote.ranked_choices)?; } tally.winners().into_vec() } }; Ok(winners) } pub fn finish(&mut self) -> Result<(), ErrorKind> { let winners = self.find_winners()?; self.winners = Some(winners); Ok(()) } } #[cfg(test)] mod tests { use super::*; #[test] fn autogen_random_id() { let poll1 = Poll::new( None, "".to_string(), "".to_string(), vec![], Duration::from_secs(1), 1, false, ); let poll2 = Poll::new( None, "".to_string(), "".to_string(), vec![], Duration::from_secs(1), 1, false, ); // Has a 1/(2^64 - 1) chance of failing when there is no bug. // This is effectively negligible. assert_ne!(poll1.unwrap().id, poll2.unwrap().id); } #[test] fn support_custom_id() { let id = "custom_id".to_string(); let poll = Poll::new( Some(id.clone()), "".to_string(), "".to_string(), vec![], Duration::from_secs(1), 1, false, ) .unwrap(); assert_eq!(poll.id, id); } #[test] fn winners() { let a = String::from("a"); let b = String::from("b"); let c = String::from("c"); let mut poll = Poll::new( None, "".to_string(), "".to_string(), vec![a.clone(), b.clone(), c.clone()], Duration::from_secs(1), 1, false, ) .unwrap(); poll.votes.push(RankedChoiceVote { ranked_choices: vec![c.clone(), a.clone(), b.clone()], voter_ip: "127.0.0.1".parse().unwrap(), }); poll.votes.push(RankedChoiceVote { ranked_choices: vec![a.clone(), c.clone(), b.clone()], voter_ip: "127.0.0.2".parse().unwrap(), }); poll.votes.push(RankedChoiceVote { ranked_choices: vec![a, c.clone()], voter_ip: "127.0.0.3".parse().unwrap(), }); poll.votes.push(RankedChoiceVote { ranked_choices: vec![b.clone(), c.clone()], voter_ip: "127.0.0.3".parse().unwrap(), }); poll.votes.push(RankedChoiceVote { ranked_choices: vec![b, c.clone()], voter_ip: "127.0.0.3".parse().unwrap(), }); poll.finish().unwrap(); assert_eq!(poll.winners.unwrap()[0].candidate, c); } }
<reponame>ezw21/RecreationServicesMap<filename>client/jimu-ui/advanced/lib/rich-text-editor/editor/plugins/bubble.d.ts /// <reference types="react" /> /** @jsx jsx */ import { React } from 'jimu-core'; import { Sources } from '../../type'; import { PopperProps } from 'jimu-ui'; import { RichPluginInjectedProps } from './plugin'; interface _BubblePluginProps extends PopperProps { source?: Sources; } export declare type BubblePluginProps = _BubblePluginProps & RichPluginInjectedProps; export declare const _Bubble: (props: BubblePluginProps) => React.ReactElement; export declare const Bubble: React.ComponentType<any>; export {};
<reponame>HSadeghein/FlappyBird<gh_stars>1-10 #include "pch.h" #include "Helper/helper.h" #include "Application.h" namespace Immortal { Application::Application() { } Application::~Application() { Terminate(); } void Application::Run() { //Initialization Application::Initialize(); GLFWwindow* window = mp_Window->GetWindow(); UpdateEvent e(0.0f,0.0f); mp_Window->OnUpdate(e); } int Application::Initialize() { //Set error call back glfwSetErrorCallback(error_callback); if (!glfwInit()) { IMMORTAL_CORE_ERROR("GLFW is not initialized!!!"); return -1; } //Create Window mp_Window = std::make_unique<Window>(640, 480); if (!mp_Window->m_IsInitialized) { IMMORTAL_CORE_ERROR("GLFW Window couldn't be created!!"); return -1; } glfwMakeContextCurrent(mp_Window->GetWindow()); //load GLAD if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) { IMMORTAL_CORE_ERROR("Failed to initialize GLAD"); return -1; } return 0; } void Application::Terminate() { mp_Window->Terminate(); glfwTerminate(); } }
<filename>src/main/java/com/thinkgem/jeesite/modules/hotel/entity/TbRoom.java /** * Copyright &copy; 2012-2016 <a href="https://github.com/thinkgem/jeesite">JeeSite</a> All rights reserved. */ package com.thinkgem.jeesite.modules.hotel.entity; import java.util.Date; import java.util.List; import java.util.Map; import org.hibernate.validator.constraints.Length; import com.thinkgem.jeesite.common.persistence.DataEntity; /** * 客房信息管理Entity * @author 王豹垒 * @version 2017-04-08 */ public class TbRoom extends DataEntity<TbRoom> { private static final long serialVersionUID = 1L; private String category; // 客房类型 private String status; // 客房状态 private String number; // 房间号 private String price; // 价格 private String state; //是否结账 private String facilitie; //客房设施 private String remark; // 备注 private String reserve; //预定状态 private TbCustomer customer; private Map<String, Object> paramMap; private List<Map<String, Object>> resultMapList; public TbRoom() { super(); } public TbRoom(String id){ super(id); } @Length(min=0, max=10, message="客房类型长度必须介于 0 和 10 之间") public String getCategory() { return category; } public void setCategory(String category) { this.category = category; } @Length(min=0, max=10, message="客房状态长度必须介于 0 和 10 之间") public String getStatus() { return status; } public void setStatus(String status) { this.status = status; } @Length(min=0, max=100, message="备注长度必须介于 0 和 100 之间") public String getRemark() { return remark; } public void setRemark(String remark) { this.remark = remark; } @Length(min=0, max=20, message="房间号长度必须介于 0 和 20 之间") public String getNumber() { return number; } public void setNumber(String number) { this.number = number; } public String getPrice() { return price; } public void setPrice(String price) { this.price = price; } public String getState() { return state; } public void setState(String state) { this.state = state; } public Map<String, Object> getParamMap() { return paramMap; } public void setParamMap(Map<String, Object> paramMap) { this.paramMap = paramMap; } public List<Map<String, Object>> getResultMapList() { return resultMapList; } public void setResultMapList(List<Map<String, Object>> resultMapList) { this.resultMapList = resultMapList; } public String getFacilitie() { return facilitie; } public void setFacilitie(String facilitie) { this.facilitie = facilitie; } public String getReserve() { return reserve; } public void setReserve(String reserve) { this.reserve = reserve; } public TbCustomer getCustomer() { return customer; } public void setCustomer(TbCustomer customer) { this.customer = customer; } }
/** * Comparator of a map containing QuadEdge as key * and Double as value (Double comparator). * * @author Eric Grosso * */ public class DoubleComparator implements Comparator<QuadEdge> { Map<QuadEdge,Double> map; /** * Constructor. * * @param map * map containing QuadEdge and Double */ public DoubleComparator(Map<QuadEdge,Double> map) { this.map = map; } /** * Method of comparison. * * @param qeA * quad edge to compare * @param qeB * quad edge to compare * @return * 1 if double value associated to qeA < double * value associated to qeB, * 0 if values are equals, * -1 otherwise */ @Override public int compare(QuadEdge qeA, QuadEdge qeB) { if (this.map.get(qeA) < this.map.get(qeB)) { return 1; } else if (this.map.get(qeA) == this.map.get(qeB)) { return 0; } else { return -1; } } }
// validateInput will validate all input before run task. func (t *ListStorageTask) validateInput() { if !t.ValidateService() { panic(fmt.Errorf("Task ListStorage value Service is invalid")) } if !t.ValidateStoragerFunc() { panic(fmt.Errorf("Task ListStorage value StoragerFunc is invalid")) } if !t.ValidateZone() { panic(fmt.Errorf("Task ListStorage value Zone is invalid")) } }
<reponame>AdityaDendukuri/IsingNucleation<filename>headers/simul.h #ifndef SIMUL_H #define SIMUL_H void run_sim_nucleation(int* LATTICE, int n, float J, float F, float beta); void ones(int* LATTICE, int n); void run_sim(int* LATTICE, int n, float J, float F, float beta); void validate_free_energy(int* LATTICE, int n, int J, int F); void make_nucleus(int* LATTICE, int n, float J, float F, float beta); #endif
/** Called when our internal view of the directory has changed. This can be * when the authorities change, networkstatuses change, the list of routerdescs * changes, or number of running routers changes. */ void router_dir_info_changed(void) { need_to_update_have_min_dir_info = 1; rend_hsdir_routers_changed(); }
/* * To change this license header, choose License Headers in Project Properties. * To change this template file, choose Tools | Templates * and open the template in the editor. */ package bookstore.ui; import bookstore.App; import bookstore.database.Database; import bookstore.database.tables.*; import java.awt.*; import java.awt.event.*; import javax.swing.*; public class Main extends javax.swing.JFrame { /** Creates new form Main */ public Main(String name, int role) { initComponents(); this.employeeNameLabel.setText(name); int position = role; String sPosition = ""; if (position == 1) sPosition = "Quản lí"; else if (position == 2) sPosition = "Kiểm kho"; else if (position == 0) sPosition = "Bán hàng"; this.employeePositionLabel.setText(sPosition); this.setLocationRelativeTo(null); } /** This method is called from within the constructor to * initialize the form. * WARNING: Do NOT modify this code. The content of this method is * always regenerated by the Form Editor. */ @SuppressWarnings("unchecked") // <editor-fold defaultstate="collapsed" desc="Generated Code">//GEN-BEGIN:initComponents private void initComponents() { settingButton = new metro.MetroButton(); mainPanel = new javax.swing.JPanel(); homePanel = new javax.swing.JPanel(); headhomePanel = new javax.swing.JPanel(); homeLabel = new javax.swing.JLabel(); bodyhomePanel = new javax.swing.JPanel(); employeeButton = new metro.MetroButton(); authorButton = new bookstore.ui.controls.MetroButton(); categoryButton = new bookstore.ui.controls.MetroButton(); publisherButton = new bookstore.ui.controls.MetroButton(); bookButton = new metro.MetroButton(); discountButton = new bookstore.ui.controls.MetroButton(); customerButton = new bookstore.ui.controls.MetroButton(); billButton = new bookstore.ui.controls.MetroButton(); moneyBillButton = new bookstore.ui.controls.MetroButton(); bookEntryButton = new bookstore.ui.controls.MetroButton(); introductionButton = new bookstore.ui.controls.MetroButton(); foothomePanel = new javax.swing.JPanel(); filler1 = new javax.swing.Box.Filler(new java.awt.Dimension(0, 0), new java.awt.Dimension(0, 0), new java.awt.Dimension(0, 32767)); employeePositionLabel = new javax.swing.JLabel(); filler3 = new javax.swing.Box.Filler(new java.awt.Dimension(0, 5), new java.awt.Dimension(0, 5), new java.awt.Dimension(32767, 5)); employeeNameLabel = new javax.swing.JLabel(); filler4 = new javax.swing.Box.Filler(new java.awt.Dimension(0, 10), new java.awt.Dimension(0, 10), new java.awt.Dimension(32767, 10)); logoutButton = new javax.swing.JButton(); filler2 = new javax.swing.Box.Filler(new java.awt.Dimension(0, 0), new java.awt.Dimension(0, 0), new java.awt.Dimension(0, 32767)); switchPanel = new javax.swing.JPanel(); employeePanel = new bookstore.ui.subpanel.EmployeePanel(); try { introductionPanel = new bookstore.ui.subpanel.IntroductionPanel(); } catch (bookstore.exceptions.DatabasePermissonErrorException e1) { e1.printStackTrace(); } authorPanel = new bookstore.ui.subpanel.AuthorPanel(); publisherPanel = new bookstore.ui.subpanel.PublisherPanel(); catogeryPanel = new bookstore.ui.subpanel.CatogeryPanel(); discountPanel = new bookstore.ui.subpanel.DiscountPanel(); bookPanel = new bookstore.ui.subpanel.BookPanel(); customerPanel = new bookstore.ui.subpanel.CustomerPanel(); moneyBillPanel = new bookstore.ui.subpanel.MoneyBillPanel(); billPanel = new bookstore.ui.subpanel.BillPanel(); bookEntryPanel = new bookstore.ui.subpanel.BookEntryPanel(); settingPanel1 = new bookstore.ui.subpanel.SettingPanel(); settingButton.setBackground(new java.awt.Color(40, 60, 92)); settingButton.setText("Cài đặt"); settingButton.setMaximumSize(new java.awt.Dimension(400, 100)); settingButton.setMinimumSize(new java.awt.Dimension(200, 20)); settingButton.setPreferredSize(new java.awt.Dimension(200, 40)); settingButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { settingButtonActionPerformed(evt); } }); setDefaultCloseOperation(javax.swing.WindowConstants.EXIT_ON_CLOSE); getContentPane().setLayout(new javax.swing.BoxLayout(getContentPane(), javax.swing.BoxLayout.LINE_AXIS)); mainPanel.setMaximumSize(new java.awt.Dimension(32000, 3200)); mainPanel.setMinimumSize(new java.awt.Dimension(400, 300)); mainPanel.setPreferredSize(new java.awt.Dimension(800, 450)); mainPanel.setLayout(new javax.swing.BoxLayout(mainPanel, javax.swing.BoxLayout.LINE_AXIS)); homePanel.setBackground(new java.awt.Color(51, 51, 51)); homePanel.setMaximumSize(new java.awt.Dimension(32000, 32867)); homePanel.setMinimumSize(new java.awt.Dimension(100, 300)); homePanel.setPreferredSize(new java.awt.Dimension(200, 400)); homePanel.setLayout(new javax.swing.BoxLayout(homePanel, javax.swing.BoxLayout.PAGE_AXIS)); headhomePanel.setBackground(new java.awt.Color(51, 51, 51)); headhomePanel.setMaximumSize(new java.awt.Dimension(32000, 32000)); headhomePanel.setPreferredSize(new java.awt.Dimension(200, 60)); homeLabel.setBackground(new java.awt.Color(102, 102, 102)); homeLabel.setFont(new java.awt.Font("Segoe UI", 1, 25)); // NOI18N homeLabel.setForeground(new java.awt.Color(255, 255, 255)); homeLabel.setHorizontalAlignment(javax.swing.SwingConstants.CENTER); homeLabel.setIcon(new javax.swing.ImageIcon("D:\\UIT\\Lap trinh java\\bookstore\\imgs\\Untitled.png")); // NOI18N homeLabel.setMaximumSize(new java.awt.Dimension(32000, 32000)); homeLabel.setPreferredSize(new java.awt.Dimension(100, 50)); org.jdesktop.layout.GroupLayout headhomePanelLayout = new org.jdesktop.layout.GroupLayout(headhomePanel); headhomePanel.setLayout(headhomePanelLayout); headhomePanelLayout.setHorizontalGroup( headhomePanelLayout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) .add(homeLabel, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, 200, Short.MAX_VALUE) ); headhomePanelLayout.setVerticalGroup( headhomePanelLayout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING) .add(homeLabel, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, 70, Short.MAX_VALUE) ); homePanel.add(headhomePanel); bodyhomePanel.setBackground(new java.awt.Color(51, 51, 51)); bodyhomePanel.setAlignmentX((float)0.5); bodyhomePanel.setMaximumSize(new java.awt.Dimension(32000, 32000)); bodyhomePanel.setPreferredSize(new java.awt.Dimension(200, 520)); bodyhomePanel.setLayout(new javax.swing.BoxLayout(bodyhomePanel, javax.swing.BoxLayout.PAGE_AXIS)); employeeButton.setBackground(new java.awt.Color(51, 51, 51)); employeeButton.setIcon(new javax.swing.ImageIcon("D:\\UIT\\Lap trinh java\\bookstore\\imgs\\customers.png")); // NOI18N employeeButton.setText("<NAME>"); employeeButton.setFont(new java.awt.Font("Sogoe Ui", 0, 17)); // NOI18N employeeButton.setHorizontalAlignment(javax.swing.SwingConstants.LEFT); employeeButton.setMaximumSize(new java.awt.Dimension(400, 100)); employeeButton.setMinimumSize(new java.awt.Dimension(200, 20)); employeeButton.setPreferredSize(new java.awt.Dimension(200, 40)); employeeButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { employeeButtonActionPerformed(evt); } }); bodyhomePanel.add(employeeButton); authorButton.setBackground(new java.awt.Color(51, 51, 51)); authorButton.setIcon(new javax.swing.ImageIcon("D:\\UIT\\Lap trinh java\\bookstore\\imgs\\authors.png")); // NOI18N authorButton.setText("<NAME>"); authorButton.setDebugGraphicsOptions(javax.swing.DebugGraphics.NONE_OPTION); authorButton.setFont(new java.awt.Font("Sogoe Ui", 0, 17)); // NOI18N authorButton.setHorizontalAlignment(javax.swing.SwingConstants.LEFT); authorButton.setMaximumSize(new java.awt.Dimension(400, 100)); authorButton.setMinimumSize(new java.awt.Dimension(200, 20)); authorButton.setPreferredSize(new java.awt.Dimension(200, 40)); authorButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { authorButtonActionPerformed(evt); } }); bodyhomePanel.add(authorButton); categoryButton.setBackground(new java.awt.Color(51, 51, 51)); categoryButton.setIcon(new javax.swing.ImageIcon("D:\\UIT\\Lap trinh java\\bookstore\\imgs\\categorys.png")); // NOI18N categoryButton.setText("Thể loại"); categoryButton.setFont(new java.awt.Font("Sogoe Ui", 0, 17)); // NOI18N categoryButton.setHorizontalAlignment(javax.swing.SwingConstants.LEFT); categoryButton.setMaximumSize(new java.awt.Dimension(400, 100)); categoryButton.setMinimumSize(new java.awt.Dimension(200, 20)); categoryButton.setPreferredSize(new java.awt.Dimension(200, 40)); categoryButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { categoryButtonActionPerformed(evt); } }); bodyhomePanel.add(categoryButton); publisherButton.setBackground(new java.awt.Color(51, 51, 51)); publisherButton.setIcon(new javax.swing.ImageIcon("D:\\UIT\\Lap trinh java\\bookstore\\imgs\\publishers.png")); // NOI18N publisherButton.setText("Nhà xuất bản"); publisherButton.setFont(new java.awt.Font("Sogoe Ui", 0, 17)); // NOI18N publisherButton.setHorizontalAlignment(javax.swing.SwingConstants.LEFT); publisherButton.setMaximumSize(new java.awt.Dimension(400, 100)); publisherButton.setMinimumSize(new java.awt.Dimension(200, 20)); publisherButton.setPreferredSize(new java.awt.Dimension(210, 40)); publisherButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { publisherButtonActionPerformed(evt); } }); bodyhomePanel.add(publisherButton); bookButton.setBackground(new java.awt.Color(51, 51, 51)); bookButton.setIcon(new javax.swing.ImageIcon("D:\\UIT\\Lap trinh java\\bookstore\\imgs\\booksicon.png")); // NOI18N bookButton.setText("Sách"); bookButton.setFont(new java.awt.Font("Sogoe Ui", 0, 17)); // NOI18N bookButton.setHorizontalAlignment(javax.swing.SwingConstants.LEFT); bookButton.setMaximumSize(new java.awt.Dimension(400, 100)); bookButton.setMinimumSize(new java.awt.Dimension(200, 20)); bookButton.setPreferredSize(new java.awt.Dimension(200, 40)); bookButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { bookButtonActionPerformed(evt); } }); bodyhomePanel.add(bookButton); discountButton.setBackground(new java.awt.Color(51, 51, 51)); discountButton.setIcon(new javax.swing.ImageIcon("D:\\UIT\\Lap trinh java\\bookstore\\imgs\\sales.png")); // NOI18N discountButton.setText("Khuyến mãi"); discountButton.setFont(new java.awt.Font("Sogoe Ui", 0, 17)); // NOI18N discountButton.setHorizontalAlignment(javax.swing.SwingConstants.LEFT); discountButton.setMaximumSize(new java.awt.Dimension(400, 100)); discountButton.setMinimumSize(new java.awt.Dimension(200, 20)); discountButton.setPreferredSize(new java.awt.Dimension(200, 40)); discountButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { discountButtonActionPerformed(evt); } }); bodyhomePanel.add(discountButton); customerButton.setBackground(new java.awt.Color(51, 51, 51)); customerButton.setIcon(new javax.swing.ImageIcon("D:\\UIT\\Lap trinh java\\bookstore\\imgs\\employee.png")); // NOI18N customerButton.setText("<NAME>"); customerButton.setFont(new java.awt.Font("Sogoe Ui", 0, 17)); // NOI18N customerButton.setHorizontalAlignment(javax.swing.SwingConstants.LEFT); customerButton.setMaximumSize(new java.awt.Dimension(400, 100)); customerButton.setMinimumSize(new java.awt.Dimension(200, 20)); customerButton.setPreferredSize(new java.awt.Dimension(200, 40)); customerButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { customerButtonActionPerformed(evt); } }); bodyhomePanel.add(customerButton); billButton.setBackground(new java.awt.Color(51, 51, 51)); billButton.setIcon(new javax.swing.ImageIcon("D:\\UIT\\Lap trinh java\\bookstore\\imgs\\bill.png")); // NOI18N billButton.setText("Hóa đơn"); billButton.setFont(new java.awt.Font("Sogoe Ui", 0, 17)); // NOI18N billButton.setHorizontalAlignment(javax.swing.SwingConstants.LEFT); billButton.setMaximumSize(new java.awt.Dimension(400, 100)); billButton.setMinimumSize(new java.awt.Dimension(200, 20)); billButton.setPreferredSize(new java.awt.Dimension(200, 40)); billButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { billButtonActionPerformed(evt); } }); bodyhomePanel.add(billButton); moneyBillButton.setBackground(new java.awt.Color(51, 51, 51)); moneyBillButton.setIcon(new javax.swing.ImageIcon("D:\\UIT\\Lap trinh java\\bookstore\\imgs\\billmoney.png")); // NOI18N moneyBillButton.setText("Phiếu thu tiền"); moneyBillButton.setFont(new java.awt.Font("Sogoe Ui", 0, 17)); // NOI18N moneyBillButton.setHorizontalAlignment(javax.swing.SwingConstants.LEFT); moneyBillButton.setMaximumSize(new java.awt.Dimension(400, 100)); moneyBillButton.setMinimumSize(new java.awt.Dimension(200, 20)); moneyBillButton.setPreferredSize(new java.awt.Dimension(200, 40)); moneyBillButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { moneyBillButtonActionPerformed(evt); } }); bodyhomePanel.add(moneyBillButton); bookEntryButton.setBackground(new java.awt.Color(51, 51, 51)); bookEntryButton.setIcon(new javax.swing.ImageIcon("D:\\UIT\\Lap trinh java\\bookstore\\imgs\\bookEntry.png")); // NOI18N bookEntryButton.setText("Phiếu nhập sách"); bookEntryButton.setFont(new java.awt.Font("Sogoe Ui", 0, 17)); // NOI18N bookEntryButton.setHorizontalAlignment(javax.swing.SwingConstants.LEFT); bookEntryButton.setMaximumSize(new java.awt.Dimension(400, 100)); bookEntryButton.setMinimumSize(new java.awt.Dimension(200, 20)); bookEntryButton.setPreferredSize(new java.awt.Dimension(200, 40)); bookEntryButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { bookEntryButtonActionPerformed(evt); } }); bodyhomePanel.add(bookEntryButton); introductionButton.setBackground(new java.awt.Color(51, 51, 51)); introductionButton.setIcon(new javax.swing.ImageIcon("D:\\UIT\\Lap trinh java\\bookstore\\imgs\\statistics.png")); // NOI18N introductionButton.setText("Thống kê"); introductionButton.setFont(new java.awt.Font("Sogoe Ui", 0, 17)); // NOI18N introductionButton.setHorizontalAlignment(javax.swing.SwingConstants.LEFT); introductionButton.setMaximumSize(new java.awt.Dimension(400, 100)); introductionButton.setMinimumSize(new java.awt.Dimension(200, 20)); introductionButton.setPreferredSize(new java.awt.Dimension(200, 40)); introductionButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { introductionButtonActionPerformed(evt); } }); bodyhomePanel.add(introductionButton); homePanel.add(bodyhomePanel); foothomePanel.setBackground(new java.awt.Color(51, 51, 51)); foothomePanel.setMaximumSize(new java.awt.Dimension(32000, 32000)); foothomePanel.setPreferredSize(new java.awt.Dimension(100, 90)); foothomePanel.setLayout(new javax.swing.BoxLayout(foothomePanel, javax.swing.BoxLayout.PAGE_AXIS)); foothomePanel.add(filler1); employeePositionLabel.setFont(new java.awt.Font("Verdana", 0, 17)); // NOI18N employeePositionLabel.setForeground(new java.awt.Color(204, 204, 255)); employeePositionLabel.setHorizontalAlignment(javax.swing.SwingConstants.CENTER); employeePositionLabel.setText("chức"); employeePositionLabel.setAlignmentX(0.5F); employeePositionLabel.setFocusable(false); employeePositionLabel.setHorizontalTextPosition(javax.swing.SwingConstants.CENTER); employeePositionLabel.setMaximumSize(new java.awt.Dimension(32000, 20)); employeePositionLabel.setMinimumSize(new java.awt.Dimension(10, 10)); employeePositionLabel.setPreferredSize(new java.awt.Dimension(100, 55)); foothomePanel.add(employeePositionLabel); foothomePanel.add(filler3); employeeNameLabel.setFont(new java.awt.Font("Verdana", 0, 17)); // NOI18N employeeNameLabel.setForeground(new java.awt.Color(204, 204, 255)); employeeNameLabel.setHorizontalAlignment(javax.swing.SwingConstants.CENTER); employeeNameLabel.setText("tên"); employeeNameLabel.setAlignmentX(0.5F); employeeNameLabel.setMaximumSize(new java.awt.Dimension(32000, 20)); employeeNameLabel.setMinimumSize(new java.awt.Dimension(10, 10)); employeeNameLabel.setPreferredSize(new java.awt.Dimension(100, 55)); foothomePanel.add(employeeNameLabel); foothomePanel.add(filler4); logoutButton.setBackground(new java.awt.Color(51, 51, 51)); logoutButton.setFont(new java.awt.Font("Sitka Display", 0, 20)); // NOI18N logoutButton.setForeground(new java.awt.Color(204, 204, 255)); logoutButton.setText("Đăng xuất"); logoutButton.setAlignmentX(0.5F); logoutButton.setFocusPainted(false); logoutButton.setMaximumSize(new java.awt.Dimension(120, 30)); logoutButton.setMinimumSize(new java.awt.Dimension(10, 10)); logoutButton.setPreferredSize(new java.awt.Dimension(97, 55)); logoutButton.addActionListener(new java.awt.event.ActionListener() { public void actionPerformed(java.awt.event.ActionEvent evt) { logoutButtonActionPerformed(evt); } }); foothomePanel.add(logoutButton); foothomePanel.add(filler2); homePanel.add(foothomePanel); mainPanel.add(homePanel); switchPanel.setMinimumSize(new java.awt.Dimension(400, 300)); switchPanel.setPreferredSize(new java.awt.Dimension(550, 400)); switchPanel.setLayout(new java.awt.CardLayout()); switchPanel.add(employeePanel, "employee"); introductionPanel.setMaximumSize(new java.awt.Dimension(32000, 32000)); introductionPanel.setMinimumSize(new java.awt.Dimension(500, 125)); introductionPanel.setPreferredSize(new java.awt.Dimension(560, 125)); switchPanel.add(introductionPanel, "introduction"); switchPanel.add(authorPanel, "author"); switchPanel.add(publisherPanel, "publisher"); switchPanel.add(catogeryPanel, "catogery"); switchPanel.add(discountPanel, "discount"); switchPanel.add(bookPanel, "book"); switchPanel.add(customerPanel, "customer"); switchPanel.add(moneyBillPanel, "moneyBill"); switchPanel.add(billPanel, "bill"); switchPanel.add(bookEntryPanel, "bookEntry"); switchPanel.add(settingPanel1, "setting"); mainPanel.add(switchPanel); getContentPane().add(mainPanel); pack(); }// </editor-fold>//GEN-END:initComponents private void introductionButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_introductionButtonActionPerformed CardLayout card = (CardLayout)this.switchPanel.getLayout(); this.introductionPanel.update(); card.show(this.switchPanel, "introduction"); }//GEN-LAST:event_introductionButtonActionPerformed private void authorButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_authorButtonActionPerformed CardLayout card = (CardLayout)this.switchPanel.getLayout(); this.authorPanel.reloadTable(); card.show(this.switchPanel, "author"); }//GEN-LAST:event_authorButtonActionPerformed private void publisherButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_publisherButtonActionPerformed CardLayout card = (CardLayout)this.switchPanel.getLayout(); this.publisherPanel.reloadTable(); card.show(this.switchPanel, "publisher"); }//GEN-LAST:event_publisherButtonActionPerformed private void categoryButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_categoryButtonActionPerformed // TODO add your handling code here: CardLayout card = (CardLayout)this.switchPanel.getLayout(); this.catogeryPanel.reloadTable(); card.show(this.switchPanel, "catogery"); }//GEN-LAST:event_categoryButtonActionPerformed private void employeeButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_employeeButtonActionPerformed // TODO add your handling code here: CardLayout card = (CardLayout)this.switchPanel.getLayout(); this.employeePanel.reloadTable(); card.show(this.switchPanel, "employee"); }//GEN-LAST:event_employeeButtonActionPerformed private void discountButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_discountButtonActionPerformed // TODO add your handling code here: CardLayout card = (CardLayout)this.switchPanel.getLayout(); this.discountPanel.reloadTable(); card.show(this.switchPanel, "discount"); }//GEN-LAST:event_discountButtonActionPerformed private void customerButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_customerButtonActionPerformed // TODO add your handling code here: CardLayout card = (CardLayout)this.switchPanel.getLayout(); this.customerPanel.reloadTable(); card.show(this.switchPanel, "customer"); }//GEN-LAST:event_customerButtonActionPerformed private void moneyBillButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_moneyBillButtonActionPerformed // TODO add your handling code here: CardLayout card = (CardLayout)this.switchPanel.getLayout(); this.moneyBillPanel.reloadTable(); card.show(this.switchPanel, "moneyBill"); }//GEN-LAST:event_moneyBillButtonActionPerformed private void billButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_billButtonActionPerformed CardLayout card = (CardLayout)this.switchPanel.getLayout(); this.billPanel.reloadTable(); card.show(this.switchPanel, "bill"); }//GEN-LAST:event_billButtonActionPerformed private void bookEntryButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_bookEntryButtonActionPerformed CardLayout card = (CardLayout)this.switchPanel.getLayout(); //this.BookEntryPanel.reloadTable(); card.show(this.switchPanel, "bookEntry"); }//GEN-LAST:event_bookEntryButtonActionPerformed private void logoutButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_logoutButtonActionPerformed App.setAccount(null); Database.initialize("orcl123", Account.ADMIN_USERNAME, Account.ADMIN_PASSWORD); Database.connect(); this.dispose(); Entry entry = new Entry(); entry.show(); }//GEN-LAST:event_logoutButtonActionPerformed private void settingButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_settingButtonActionPerformed CardLayout card = (CardLayout)this.switchPanel.getLayout(); card.show(this.switchPanel, "setting"); }//GEN-LAST:event_settingButtonActionPerformed private void bookButtonActionPerformed(java.awt.event.ActionEvent evt) {//GEN-FIRST:event_bookButtonActionPerformed // TODO add your handling code here: CardLayout card = (CardLayout)this.switchPanel.getLayout(); this.bookPanel.reloadTable(); card.show(this.switchPanel, "book"); }//GEN-LAST:event_bookButtonActionPerformed // Variables declaration - do not modify//GEN-BEGIN:variables private bookstore.ui.controls.MetroButton authorButton; private bookstore.ui.subpanel.AuthorPanel authorPanel; private bookstore.ui.controls.MetroButton billButton; private bookstore.ui.subpanel.BillPanel billPanel; private javax.swing.JPanel bodyhomePanel; private metro.MetroButton bookButton; private bookstore.ui.controls.MetroButton bookEntryButton; private bookstore.ui.subpanel.BookEntryPanel bookEntryPanel; private bookstore.ui.subpanel.BookPanel bookPanel; private bookstore.ui.controls.MetroButton categoryButton; private bookstore.ui.subpanel.CatogeryPanel catogeryPanel; private bookstore.ui.controls.MetroButton customerButton; private bookstore.ui.subpanel.CustomerPanel customerPanel; private bookstore.ui.controls.MetroButton discountButton; private bookstore.ui.subpanel.DiscountPanel discountPanel; private metro.MetroButton employeeButton; private javax.swing.JLabel employeeNameLabel; private bookstore.ui.subpanel.EmployeePanel employeePanel; private javax.swing.JLabel employeePositionLabel; private javax.swing.Box.Filler filler1; private javax.swing.Box.Filler filler2; private javax.swing.Box.Filler filler3; private javax.swing.Box.Filler filler4; private javax.swing.JPanel foothomePanel; private javax.swing.JPanel headhomePanel; private javax.swing.JLabel homeLabel; private javax.swing.JPanel homePanel; private bookstore.ui.controls.MetroButton introductionButton; private bookstore.ui.subpanel.IntroductionPanel introductionPanel; private javax.swing.JButton logoutButton; private javax.swing.JPanel mainPanel; private bookstore.ui.controls.MetroButton moneyBillButton; private bookstore.ui.subpanel.MoneyBillPanel moneyBillPanel; private bookstore.ui.controls.MetroButton publisherButton; private bookstore.ui.subpanel.PublisherPanel publisherPanel; private metro.MetroButton settingButton; private bookstore.ui.subpanel.SettingPanel settingPanel1; private javax.swing.JPanel switchPanel; // End of variables declaration//GEN-END:variables }
export type Adapters = 'angular' | 'node' | 'react' | 'svelte' | 'vue' export type OutputFormats = 'TypeScript' | 'JavaScript' export type GeneratorConfig = { $schema?: string baseLocale?: string tempPath?: string outputPath?: string outputFormat?: OutputFormats typesFileName?: string utilFileName?: string formattersTemplateFileName?: string typesTemplateFileName?: string esmImports?: boolean adapter?: Adapters adapterFileName?: string loadLocalesAsync?: boolean generateOnlyTypes?: boolean banner?: string } export type RollupConfig = { locales?: string[] } export type GeneratorConfigWithDefaultValues = GeneratorConfig & { baseLocale: string tempPath: string outputPath: string outputFormat: OutputFormats typesFileName: string utilFileName: string formattersTemplateFileName: string typesTemplateFileName: string esmImports: boolean loadLocalesAsync: boolean generateOnlyTypes: boolean banner: string }
<reponame>zurutech/ashpy<gh_stars>10-100 # Copyright 2020 Zuru Tech HK Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for :mod:`ashpy.trainers`.""" from pathlib import Path from typing import List import ashpy import pytest import tensorflow as tf from ashpy.trainers import AdversarialTrainer, ClassifierTrainer from tests.test_restorers import ModelNotConstructedError, _check_models_weights from tests.utils.fake_training_loop import ( FakeAdversarialTraining, FakeClassifierTraining, FakeTraining, ) def test_correct_trainer_restoration_on_restart(fake_training_fn, tmpdir): logdir = Path(tmpdir) fake_training: FakeTraining = fake_training_fn(logdir=logdir) assert fake_training() if isinstance(fake_training, FakeClassifierTraining): assert isinstance(fake_training.trainer, ClassifierTrainer) trained_model = fake_training.trainer._model new_training: FakeClassifierTraining = fake_training_fn(logdir=logdir) new_training.trainer._build_and_restore_models(new_training.dataset) restored_model = new_training.trainer._model _check_models_weights(trained_model, restored_model) if isinstance(fake_training, FakeAdversarialTraining): assert isinstance(fake_training.trainer, AdversarialTrainer) trained_g = fake_training.trainer._generator trained_d = fake_training.trainer._discriminator new_training: FakeAdversarialTraining = fake_training_fn(logdir=logdir) new_training.trainer._build_and_restore_models(new_training.dataset) restored_g = new_training.trainer._generator restored_d = new_training.trainer._discriminator _check_models_weights(trained_g, restored_g) _check_models_weights(trained_d, restored_d) def test_generate_human_ckpt_dict(fake_training_fn, tmpdir): """ Test that the generation of the human readable map of the ckpt_dict works. TODO: improve the test. """ logdir = Path(tmpdir) fake_training = fake_training_fn(logdir=logdir) assert fake_training() trainer = fake_training.trainer assert trainer._checkpoint_map assert (Path(trainer._ckpts_dir) / "checkpoint_map.json").exists() metrics: List[ashpy.metrics.Metric] = trainer._metrics for metric in metrics: assert metric.best_folder / "ckpts" / "checkpoint_map.json"
Continuing value of the VDRL test and biological false reactions Role of the VDRL test in the detection of schedule: an explanatory comment added syphilis to the issued report should overcome any such misunderstanding. Yours faithfully, Sir, H Young* In the paper by P Diggory (Br J Vener Dis D H H Robertson* 1983;59:8-10) it was stated that, "the JM Huntert TPHA test results on sera from patients *Departments of Bacteriology treated during the primary and early and Genitourinary Medicine, secondary stages of the disease usually Edinburgh University, become negative within one or two years." Edinburgh EH8 9AG Work carried out in Edinburgh' was mistDepartment of Genitourinary Medicine, quoted in support of this statement. Our Glasgow Royal Infirmary, data showed that the TPHA test results did Glasgow G3 1 not become negative in any of the cases of treated syphilis. A more detailed study2 confirmed these earlier findings: in none of References 55 cases of early syphilis in which the 1. Young H, Henrichsen C, Robertson DHH. TPHA result was positive before treatment Treponema pallidum haemagglutination test did the test become negative after treatas a screening procedure for the diagnosis of ment. syphilis. Br J Vener Dis 1974; 50:341-6. 2. Hunter JM. The effect of treatment on the We also have reservations regarding the Treponema pallidum haemagglutination test general proposal that the VDRL test should in early syphilis. Scott medJ 1979; 24:307-12. be withdrawn from initial testing of syphilis 3. Robertson DHH, McMillan A, Young H,
// Bytes serializes an SVB. func (s SVB) Bytes() []byte { u := make([]uint16, s.Size()+2) u[0] = s.MainAddress u[1] = 0xffff for i, c := range s.Constants { u[i+2] = c.Value } i := uint16(len(s.Constants)) + 2 for _, sub := range s.Subroutines { for _, op := range sub.Instructions { code := (op.Opcode << 8) packed := dat.OpNameToPacked[op.Name] switch packed { case 0: u[i] = code case 1: u[i] = code | op.Operands[0] case 2: u[i] = code | (op.Operands[0] << 4) | op.Operands[1] } i++ for _, and := range op.Operands[packed:] { u[i] = and i++ } } } buf := new(bytes.Buffer) binary.Write(buf, binary.BigEndian, u) return buf.Bytes() }
Secure The main focus of this framework is to be secure. Banshee is protected against common attacks like SQL injection, Cross-Site Scripting, Cross-Site Request Forgery and session hijacking. The framework takes care of authenticating users in a transparent and secure way. While many other frameworks claim to be secure, Banshee is one of the few consisting of provable secure code. Even after you change the code. Fast Although Banshee offers a lot of features and has everything a modern framework needs, it is very fast. Its lean MVC design and usage of XSLT for the view, result in very fast page generation. While it has an internal caching system for improved speed, it is still fast without it. Banshee has built-in support for Hiawatha's cache system to offer the same speed as a static website.
/* Daily Recipe Table */ static abstract class DailyRecipeTable { static final String TABLE_NAME = "daily_recipe"; static final TableColumn COLUMN_ID = new TableColumn(DataType.INTEGER, "daily_recipe_id"); static final TableColumn COLUMN_YEAR = new TableColumn(DataType.INTEGER, "year"); static final TableColumn COLUMN_MONTH = new TableColumn(DataType.INTEGER, "month"); static final TableColumn COLUMN_DAY = new TableColumn(DataType.INTEGER, "date"); static final TableColumn COLUMN_RECIPE_FK = new TableColumn(DataType.INTEGER, "fk_recipe"); }
// ClearedEdges returns all edge names that were cleared in this mutation. func (m *DiscordBotMutation) ClearedEdges() []string { edges := make([]string, 0, 3) if m.clearedaccount { edges = append(edges, discordbot.EdgeAccount) } if m.clearedproject { edges = append(edges, discordbot.EdgeProject) } if m.clearedrepository { edges = append(edges, discordbot.EdgeRepository) } return edges }
All other candidates seen as unacceptable by half or more of Republicans PRINCETON, NJ -- Newt Gingrich (62%) and Mitt Romney (54%) are the only two candidates Republicans say would be acceptable presidential nominees from their party, emphasizing the degree to which the GOP race has narrowed down to these two men at this juncture. A majority of Republicans say each of the other six candidates measured would not be acceptable nominees. Republicans and Republican-leaning independents in Gallup's Nov. 28-Dec. 1 survey were asked to rate the acceptability of eight active GOP candidates. The "acceptable" responses range from Gingrich's 62% to 27% for Rick Santorum. This is the first time Gallup has asked this question in reference to the 2012 election. More than half of Republicans nationwide now see Rick Perry and Herman Cain -- both of whom previously led or tied for the lead in Gallup's measure of positive intensity and in Gallup's trial-heat ballots -- as unacceptable nominees. These data were collected prior to Cain's Saturday announcement that he was suspending his campaign for the GOP nomination. Majority of Tea Party Supporters Find Both Gingrich and Romney Acceptable Tea Party supporters -- about 42% of Republicans in this sample -- are at least slightly more likely to find six of the eight candidates acceptable compared with those who are not Tea Party supporters. Tea Party supporters are less positive than nonsupporters about Ron Paul and Jon Huntsman. Eighty-two percent of Tea Party supporters would find Gingrich acceptable as a nominee, making him by far their top choice on this measure. Importantly, Tea Party supporters are also more positive about Romney than are nonsupporters, putting him in second place behind Gingrich, with a 58% acceptable score. Michele Bachmann (52%) is the third candidate whom a majority of Tea Party supporters would find acceptable. Conservative Republicans, about 70% of Republicans in this sample, don't differ much from all Republicans in their views of the candidates' acceptability. Moderate and liberal Republicans, however, are substantially less likely to say that Gingrich is an acceptable nominee than are conservatives, yielding a situation in which about equal percentages of moderate and liberal Republicans find Romney (51%) and Gingrich (48%) acceptable. Moderate and liberal Republicans are more likely than conservatives to say Paul and Huntsman would be acceptable nominees -- but in both instances, support for the candidates is still well below the majority level. Older Republicans Positive About Gingrich Republicans 55 and older are more likely than younger Republicans to say Gingrich and Romney would be acceptable nominees. Among the older group, 73% say Gingrich would be acceptable, and 62% say Romney would be acceptable. Older Republicans are slightly more positive than younger Republicans about Bachmann, but are less positive than the younger group about Cain, Paul, and Huntsman. This echoes the finding from Gallup's recent nomination preference question, which showed Gingrich demonstrating great appeal to older Republicans. Implications As the race for the GOP nomination enters the final month before actual voting begins, Republicans nationwide appear to have narrowed their field of acceptable candidates down to two -- former Speaker Gingrich and former Massachusetts Gov. Romney. More Republicans see Gingrich than see Romney as an acceptable nominee, particularly among Tea Party supporters and older Republicans -- although a majority of both of these groups say Romney is also acceptable. Track every angle of the presidential race on Gallup.com's Election 2012 page. Sign up to get Election 2012 news stories from Gallup as soon as they are published.
def lat_r(self): self.LOGGER("Moving laterally right")
#include <bits/stdc++.h> #define ll long long #define F first #define S second #define SI size() #define pb push_back #define R return using namespace std; ll i,m,n,sum; pair<ll,ll>p[107]; ll a[1007][1007]; void dfs(pair<ll,ll>nod) { for (ll i = 1; i <= 1000; i++) { if (a[i][nod.second] == 1) { a[i][nod.second] = 2; dfs({i, nod.second}); } if (a[nod.first][i]==1) { a[nod.first][i] = 2; dfs({nod.first, i}); } } } int main() { cin >> n; for (ll i=0;i<n;i++) { cin >> p[i].first >> p[i].second; a[p[i].first][p[i].second] = 1; } for (ll i = 0;i<n;i++) { if (a[p[i].first][p[i].second] != 2) { dfs(p[i]); sum++; } } cout<<sum-1; }
<gh_stars>100-1000 // Copyright <NAME> 2015 - 2016. // Distributed under the Boost Software License, Version 1.0. // (See accompanying file ../LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt) /// \file /// \brief definition of `sg14::fixed_point` type #if !defined(SG14_FIXED_POINT_DEF_H) #define SG14_FIXED_POINT_DEF_H 1 #include <sg14/auxiliary/const_integer.h> #include <sg14/bits/number_base.h> /// study group 14 of the C++ working group namespace sg14 { // forward declaration template<class Rep = int, int Exponent = 0> class fixed_point; //////////////////////////////////////////////////////////////////////////////// // implementation-specific definitions namespace _impl { namespace fp { //////////////////////////////////////////////////////////////////////////////// // sg14::_impl::float_of_size template<int NumBits, class Enable = void> struct float_of_size; template<int NumBits> struct float_of_size<NumBits, enable_if_t<NumBits <= sizeof(float)*CHAR_BIT>> { using type = float; }; template<int NumBits> struct float_of_size<NumBits, enable_if_t<sizeof(float)*CHAR_BIT < NumBits && NumBits <= sizeof(double)*CHAR_BIT>> { using type = double; }; template<int NumBits> struct float_of_size<NumBits, enable_if_t<sizeof(double)*CHAR_BIT < NumBits && NumBits <= sizeof(long double)*CHAR_BIT>> { using type = long double; }; //////////////////////////////////////////////////////////////////////////////// // sg14::_impl::float_of_same_size template<class T> using float_of_same_size = typename float_of_size<digits<T>::value + is_signed<T>::value>::type; } } /// \brief literal real number approximation that uses fixed-point arithmetic /// \headerfile sg14/fixed_point /// /// \tparam Rep the underlying type used to represent the value /// \tparam Exponent the value by which to scale the integer value in order to get the real value /// /// \par Examples /// /// To define a fixed-point value 1 byte in size with a sign bit, 3 integer bits and 4 fractional bits: /// \snippet snippets.cpp define a fixed_point value template<class Rep, int Exponent> class fixed_point : public _impl::number_base<fixed_point<Rep, Exponent>, Rep> { using _base = _impl::number_base<fixed_point<Rep, Exponent>, Rep>; public: //////////////////////////////////////////////////////////////////////////////// // types /// alias to template parameter, \a Rep using rep = Rep; //////////////////////////////////////////////////////////////////////////////// // constants /// value of template parameter, \a Exponent constexpr static int exponent = Exponent; /// number of binary digits this type can represent; /// equivalent to [std::numeric_limits::digits](http://en.cppreference.com/w/cpp/types/numeric_limits/digits) constexpr static int digits = std::numeric_limits<Rep>::digits; /// number of binary digits devoted to integer part of value; /// can be negative for specializations with especially small ranges constexpr static int integer_digits = digits+exponent; /// number of binary digits devoted to fractional part of value; /// can be negative for specializations with especially large ranges constexpr static int fractional_digits = -exponent; //////////////////////////////////////////////////////////////////////////////// // functions private: // constructor taking representation explicitly using operator++(int)-style trick constexpr fixed_point(rep r, int) :_base(r) { } public: /// default constructor constexpr fixed_point() : _base() { } /// constructor taking a fixed-point type template<class FromRep, int FromExponent> constexpr fixed_point(const fixed_point<FromRep, FromExponent>& rhs) : _base(fixed_point_to_rep(rhs)) { } /// constructor taking an integral_constant type template<class Integral, Integral Constant> constexpr fixed_point(const std::integral_constant<Integral, Constant>&) : fixed_point(fixed_point<Integral, 0>::from_data(Constant)) { } /// constructor taking an integer type template<class S, _impl::enable_if_t<std::numeric_limits<S>::is_integer, int> Dummy = 0> constexpr fixed_point(const S& s) : fixed_point(fixed_point<S, 0>::from_data(s)) { } /// constructor taking an integral_constant type template<class Integral, Integral Value, int Digits> constexpr fixed_point(const_integer<Integral, Value, Digits, Exponent> ci) : _base(ci << Exponent) { } /// constructor taking a floating-point type template<class S, _impl::enable_if_t<std::numeric_limits<S>::is_iec559, int> Dummy = 0> constexpr fixed_point(S s) :_base(floating_point_to_rep(s)) { } /// copy assignment operator taking an integer type template<class S, _impl::enable_if_t<std::numeric_limits<S>::is_integer, int> Dummy = 0> fixed_point& operator=(S s) { return operator=(fixed_point<S, 0>::from_data(s)); } /// copy assignment operator taking a floating-point type template<class S, _impl::enable_if_t<std::numeric_limits<S>::is_iec559, int> Dummy = 0> fixed_point& operator=(S s) { _base::operator=(floating_point_to_rep(s)); return *this; } /// copy assignement operator taking a fixed-point type template<class FromRep, int FromExponent> fixed_point& operator=(const fixed_point<FromRep, FromExponent>& rhs) { _base::operator=(fixed_point_to_rep(rhs)); return *this; } /// returns value represented as bool template<typename R = Rep> constexpr operator typename std::enable_if<std::is_convertible<Rep, bool>::value, bool>() const { return static_cast<bool>(_base::data()); } /// returns value represented as integral template<class S, _impl::enable_if_t<std::numeric_limits<S>::is_integer, int> Dummy = 0> constexpr operator S() const { return rep_to_integral<S>(_base::data()); } /// returns value represented as floating-point template<class S, _impl::enable_if_t<std::numeric_limits<S>::is_iec559, int> Dummy = 0> explicit constexpr operator S() const { return rep_to_floating_point<S>(_base::data()); } /// creates an instance given the underlying representation value static constexpr fixed_point from_data(rep const & r) { return fixed_point(r, 0); } private: template<class S, _impl::enable_if_t<std::numeric_limits<S>::is_iec559, int> Dummy = 0> static constexpr S one(); template<class S, _impl::enable_if_t<std::numeric_limits<S>::is_integer, int> Dummy = 0> static constexpr S one(); template<class S> static constexpr S inverse_one(); template<class S> static constexpr S rep_to_integral(rep r); template<class S> static constexpr rep floating_point_to_rep(S s); template<class S> static constexpr S rep_to_floating_point(rep r); template<class FromRep, int FromExponent> static constexpr rep fixed_point_to_rep(const fixed_point<FromRep, FromExponent>& rhs); }; /// value of template parameter, \a Exponent template<class Rep, int Exponent> constexpr int fixed_point<Rep, Exponent>::exponent; /// number of binary digits this type can represent; /// equivalent to [std::numeric_limits::digits](http://en.cppreference.com/w/cpp/types/numeric_limits/digits) template<class Rep, int Exponent> constexpr int fixed_point<Rep, Exponent>::digits; /// number of binary digits devoted to integer part of value; /// can be negative for specializations with especially small ranges template<class Rep, int Exponent> constexpr int fixed_point<Rep, Exponent>::integer_digits; /// number of binary digits devoted to fractional part of value; /// can be negative for specializations with especially large ranges template<class Rep, int Exponent> constexpr int fixed_point<Rep, Exponent>::fractional_digits; //////////////////////////////////////////////////////////////////////////////// // general-purpose implementation-specific definitions namespace _impl { //////////////////////////////////////////////////////////////////////////////// // sg14::_impl::is_fixed_point template<class T> struct is_fixed_point : public std::false_type { }; template<class Rep, int Exponent> struct is_fixed_point<fixed_point<Rep, Exponent>> : public std::true_type { }; //////////////////////////////////////////////////////////////////////////////// // sg14::_impl::shift_left // performs a shift operation by a fixed number of bits avoiding two pitfalls: // 1) shifting by a negative amount causes undefined behavior // 2) converting between integer types of different sizes can lose significant bits during shift right // Exponent == 0 template<int exp, class Output, class Input> constexpr Output shift_left(Input i) { using larger = typename std::conditional< digits<Input>::value<=digits<Output>::value, Output, Input>::type; return (exp>-std::numeric_limits<larger>::digits) ? static_cast<Output>(_impl::scale<larger>(static_cast<larger>(i), 2, exp)) : Output{0}; } //////////////////////////////////////////////////////////////////////////////// // file-local implementation-specific definitions namespace fp { namespace type { //////////////////////////////////////////////////////////////////////////////// // sg14::_impl::fp::type::pow2 // returns given power of 2 template<class S, int Exponent, enable_if_t<Exponent==0, int> Dummy = 0> constexpr S pow2() { static_assert(std::numeric_limits<S>::is_iec559, "S must be floating-point type"); return S{1.}; } template<class S, int Exponent, enable_if_t<!(Exponent<=0) && (Exponent<8), int> Dummy = 0> constexpr S pow2() { static_assert(std::numeric_limits<S>::is_iec559, "S must be floating-point type"); return pow2<S, Exponent-1>()*S(2); } template<class S, int Exponent, enable_if_t<(Exponent>=8), int> Dummy = 0> constexpr S pow2() { static_assert(std::numeric_limits<S>::is_iec559, "S must be floating-point type"); return pow2<S, Exponent-8>()*S(256); } template<class S, int Exponent, enable_if_t<!(Exponent>=0) && (Exponent>-8), int> Dummy = 0> constexpr S pow2() { static_assert(std::numeric_limits<S>::is_iec559, "S must be floating-point type"); return pow2<S, Exponent+1>()*S(.5); } template<class S, int Exponent, enable_if_t<(Exponent<=-8), int> Dummy = 0> constexpr S pow2() { static_assert(std::numeric_limits<S>::is_iec559, "S must be floating-point type"); return pow2<S, Exponent+8>()*S(.003906250); } } } } //////////////////////////////////////////////////////////////////////////////// // sg14::fixed_point<> member definitions template<class Rep, int Exponent> template<class S, _impl::enable_if_t<std::numeric_limits<S>::is_iec559, int> Dummy> constexpr S fixed_point<Rep, Exponent>::one() { return _impl::fp::type::pow2<S, -exponent>(); } template<class Rep, int Exponent> template<class S, _impl::enable_if_t<std::numeric_limits<S>::is_integer, int> Dummy> constexpr S fixed_point<Rep, Exponent>::one() { return fixed_point<S, 0>::from_data(1); } template<class Rep, int Exponent> template<class S> constexpr S fixed_point<Rep, Exponent>::inverse_one() { static_assert(std::numeric_limits<S>::is_iec559, "S must be floating-point type"); return _impl::fp::type::pow2<S, exponent>(); } template<class Rep, int Exponent> template<class S> constexpr S fixed_point<Rep, Exponent>::rep_to_integral(rep r) { static_assert(std::numeric_limits<S>::is_integer, "S must be integral type"); return _impl::shift_left<exponent, S>(r); } template<class Rep, int Exponent> template<class S> constexpr typename fixed_point<Rep, Exponent>::rep fixed_point<Rep, Exponent>::floating_point_to_rep(S s) { static_assert(std::numeric_limits<S>::is_iec559, "S must be floating-point type"); return static_cast<rep>(s*one<S>()); } template<class Rep, int Exponent> template<class S> constexpr S fixed_point<Rep, Exponent>::rep_to_floating_point(rep r) { static_assert(std::numeric_limits<S>::is_iec559, "S must be floating-point type"); return S(r)*inverse_one<S>(); } template<class Rep, int Exponent> template<class FromRep, int FromExponent> constexpr typename fixed_point<Rep, Exponent>::rep fixed_point<Rep, Exponent>::fixed_point_to_rep(const fixed_point<FromRep, FromExponent>& rhs) { return _impl::shift_left<FromExponent-exponent, rep>(rhs.data()); } } #endif // SG14_FIXED_POINT_DEF_H
<gh_stars>0 use ::geo::{Point, Size}; pub enum Origin { Center, TopLeft, TopRight, BottomLeft, BottomRight, } enum Side { Top(f32), Bottom(f32), Left(f32), Right(f32), } struct SideCollection { top: f32, bottom: f32, left: f32, right: f32 } impl SideCollection { pub fn new(top: f32, bottom: f32, left: f32, right: f32) -> Self { Self { top, bottom, left, right } } } // MASK // pub struct Mask { pub point: Point, pub size: Size, pub origin: Origin, } impl Mask { pub fn new(x: f32, y: f32, w: f32, h: f32, origin: Origin) -> Self { Self { point: Point { x, y }, size: Size { w, h }, origin } } pub fn point(&self) -> &Point { &self.point } pub fn point_mut(&mut self) -> &mut Point { &mut self.point } pub fn set_point(&mut self, point: Point) { self.point = point; } pub fn rectangle(&self) -> ::ggez::graphics::Rect { let top_left: Point = self.top_left(); return [ top_left.x, top_left.y, self.size.w, self.size.h ].into(); } pub fn intersects(&self, mask: &Mask) -> bool { let self_sides: SideCollection = self.sides(); let othr_sides: SideCollection = mask.sides(); return ( ( ( self_sides.left >= othr_sides.left && self_sides.left <= othr_sides.right ) || ( self_sides.left <= othr_sides.left && self_sides.right >= othr_sides.left ) ) && ( ( self_sides.top >= othr_sides.top && self_sides.top <= othr_sides.bottom ) || ( self_sides.top <= othr_sides.top && self_sides.bottom >= othr_sides.top ) ) ); } fn top_left(&self) -> Point { match self.origin { Origin::Center => Point::new( self.point.x - self.size.w / 2.0, self.point.y - self.size.h / 2.0 ), Origin::TopLeft => Point::new( self.point.x, self.point.y ), Origin::TopRight => Point::new( self.point.x - self.size.w, self.point.y ), Origin::BottomLeft => Point::new( self.point.x, self.point.y - self.size.h ), Origin::BottomRight => Point::new( self.point.x - self.size.w, self.point.y - self.size.h ) } } fn side(&self, side: char) -> f32 { let top_left: Point = self.top_left(); return match side { 't' => top_left.y, 'b' => top_left.y + self.size.h, 'l' => top_left.x, 'r' => top_left.x + self.size.w, _ => panic!["geo::Mask::side() expected one of 't', 'b', 'l', or 'r'"], }; } fn sides(&self) -> SideCollection { SideCollection::new( self.side('t'), self.side('b'), self.side('l'), self.side('r') ) } }
The Republican National Committee changed leadership Thursday and that means a change in the fandom of the new RNC chair as well. At least at the RNC offices, it means out with the Green Bay Packers and in with the Detroit Lions. With Wisconsin native – and former RNC chair Reince Preibus – headed to the White House on Friday to become the new chief of staff, his job was open. And on Thursday, the RNC chose Ronna Romney McDaniel to be the new chairwoman of the RNC. And in her introductory speech, that reportedly meant at least one reference to the Lions and the Packers. New RNC chair Ronna Romney McDaniel: "We can't wait to move all the Packers stuff out of the RNC and move all the Lions stuff into the RNC." — Ryan Struyk (@ryanstruyk) January 19, 2017 Preibus is a Packers fan and apparently McDaniel, who was the chairwoman of the Michigan Republican Party before her election Thursday, is a Lions fan. She posted a picture from the tunnel at Ford Field prior to the Lions-Packers game in Week 17. The Packers beat the Lions, 31-24, to clinch the NFC North that night. While both teams made the playoffs, the Packers are playing for the NFC Championship on Sunday in Atlanta. The Lions lost their ninth straight playoff game in the wild-card round, losing 26-6 at Seattle.
def to_string(self, history=True, screen=True, remove_blank_end=True, formatter=None): lines = [] if history: lines.extend(map(self.fixup_line, self.history)) if screen: lines.extend(map(self.fixup_line, self.main_screen)) if not lines: return if remove_blank_end: lines = self.drop_end(None, list(lines)) if formatter is None: formatter = self.formatter return formatter.format(lines)
Sen. Mike Lee spoke at the Heritage Foundation and spoke of the steps needed to develop a conservative reform agenda that will appeal to the broader electorate. Sen. Lee crouches his proposals in “moderate” or even left-of-center terminology. In contrast, his actual policy proposals themselves are right-of-center small government ideas. As DrewM pointed out at the Ace of Spades HQ: “I’ve argued before in various places that conservatives need candidates who are better liars. Democrats run candidates in red/purple states that talk a reasonable game back home but when they get to DC, they vote the Reid/Pelosi/Obama line without fail. The GOP on the other hand has a nasty habit of running candidates that talk a very conservative game and then vote like moderates when it counts. Conservatives need to find a way to flip that calculus within the GOP if the marriage is to survive. “One reason I think conservative voters want their candidates to stake out the most rightward position possible and try and hold them to it is they often distrust a candidate who sounds moderate to actually be conservative when push comes to shove.” As much as an honest appeal to the virtues of limited government ought to be a winning position, in reality it isn’t. People want a proactive government and respond positively towards calls to “do something!” They want the government and the people they elect to care about them. For example, if you say you want the government to regulate something less, people will be aghast. Why? Because the government isn’t stopping bad stuff from happening! However, if you say that you want the government to liberate small businesses and innovators by eliminating barriers caused by unnecessary red tape, people will be far more receptive. As off-putting as that may sound to those who believe on principle that smaller government than what we have is a good in and of itself, it is the type of mindset that we must appeal to if we are to win and actually start turning the statist Progressive onslaught against us. DrewM further notes: “I think my ambivalence about the speech, in particular the transportation example (which is a stand-in for how to deal with other policy issues, talk big, act small) is that I want someone to have a national fight about the role and scope of government. I want to take the ObamaCare debacle and discredit the notion of big government paternalism for the ages! In short I want to crush the Democrats, see them driven before us, and to hear the lamentation of their women. “Lee’s approach isn’t as satisfying as Rand Paul’s slash and burn approach (which Lee supported) but it’s likely to be more politically effective.” Therein lies the problem. An incremental approach crouched in the language of the Progressives could win, but it requires trust in those who run and govern under this strategy. The Progressives have faith in their dialectic. Conservatives don’t have faith in the Republican Party or “the Establishment.” Tweet
package dnsserver import ( "net" "strings" "github.com/coredns/coredns/plugin" "github.com/miekg/dns" ) type zoneAddr struct { Zone string Port string Transport string // dns, tls or grpc IPNet *net.IPNet // if reverse zone this hold the IPNet } // String return the string representation of z. func (z zoneAddr) String() string { return z.Transport + "://" + z.Zone + ":" + z.Port } // Transport returns the protocol of the string s func Transport(s string) string { switch { case strings.HasPrefix(s, TransportTLS+"://"): return TransportTLS case strings.HasPrefix(s, TransportDNS+"://"): return TransportDNS case strings.HasPrefix(s, TransportGRPC+"://"): return TransportGRPC } return TransportDNS } // normalizeZone parses an zone string into a structured format with separate // host, and port portions, as well as the original input string. func normalizeZone(str string) (zoneAddr, error) { var err error // Default to DNS if there isn't a transport protocol prefix. trans := TransportDNS switch { case strings.HasPrefix(str, TransportTLS+"://"): trans = TransportTLS str = str[len(TransportTLS+"://"):] case strings.HasPrefix(str, TransportDNS+"://"): trans = TransportDNS str = str[len(TransportDNS+"://"):] case strings.HasPrefix(str, TransportGRPC+"://"): trans = TransportGRPC str = str[len(TransportGRPC+"://"):] } host, port, ipnet, err := plugin.SplitHostPort(str) if err != nil { return zoneAddr{}, err } if port == "" { if trans == TransportDNS { port = Port } if trans == TransportTLS { port = TLSPort } if trans == TransportGRPC { port = GRPCPort } } return zoneAddr{Zone: dns.Fqdn(host), Port: port, Transport: trans, IPNet: ipnet}, nil } // Supported transports. const ( TransportDNS = "dns" TransportTLS = "tls" TransportGRPC = "grpc" )
{-# LANGUAGE CPP #-} {-# LANGUAGE OverloadedStrings #-} -- | This module provides constants the module ClickHouseDriver.Core.ServerProtocol where import Data.ByteString (ByteString) import Data.Vector (Vector, fromList, (!?)) -- Name, version, revision _HELLO :: Word _HELLO = 0 :: Word -- A block of data _DATA :: Word _DATA = 1 :: Word -- The exception during query execution _EXCEPTION :: Word _EXCEPTION = 2 :: Word -- Query execution process rows read, bytes read _PROGRESS :: Word _PROGRESS = 3 :: Word -- ping response _PONG :: Word _PONG = 4 :: Word -- All packets were transimitted _END_OF_STREAM :: Word _END_OF_STREAM = 5 :: Word -- Packet with profiling info _PROFILE_INFO :: Word _PROFILE_INFO = 6 :: Word -- A block with totals _TOTAL :: Word _TOTAL = 7 :: Word -- A block with minimums and maximums _EXTREMES :: Word _EXTREMES = 8 :: Word -- A response to TableStatus request _TABLES_STATUS_RESPONSE :: Word _TABLES_STATUS_RESPONSE = 9 :: Word -- A System logs of the query execution _LOG :: Word _LOG = 10 :: Word -- Columns' description for default values calculation _TABLE_COLUMNS :: Word _TABLE_COLUMNS = 11 :: Word typeStr :: Vector ByteString typeStr = fromList [ "Hello", "Data", "Exception", "Progress", "Pong", "EndOfStream", "ProfileInfo", "Totals", "Extremes", "TablesStatusResponse", "Log", "TableColumns" ] toString :: Int -> ByteString toString n = case typeStr !? n of Nothing -> "Unknown Packet" Just t -> t
The Rise And Fall Of Sloppy We knew going into the 2017 season that the year would be one of change here at Parsimonious Racing. We took on a new co-driver in Trevor, and made the move from STS to STR. The weapon of choice was a 2008 Mazda MX-5 named "Sloppy". Sloppy's path to us was not direct. It first showed up at a WDCR autocross with a For Sale sign on it, and it was cheap. It also had a dented front fender, a roll bar installed and a good chunk of the rear interior plastics missing. It also had brown seats and a tan top, so I was completely uninterested. Danny Kao, though, was, and he wound up buying it. Danny replaced the dented fender, removed the roll bar and sourced most of the interior pieces. He then autocrossed it. On stock shocks with lowering springs, he ran close enough to Brian Karwan's times at a PCA event at Hershey Park that he started thinking about diving deeper into a real STR build. This meant acquiring coilovers, sway bars, intake, header, tune, and lots of other go-fast bits that might be necessary to make the car at least semi-competitive. Staring down that cash outlay, Danny did the smart thing and put the car up for sale. It was still cheap, but I still ignored it because it was a NC with brown seats and a tan interior. But when Kate called me from work one day and said she wanted the car, well, it's not like I could say no. That's when we became the proud owners of a cheap MX-5. The car came with many parts, both stock stuff that was taken out and upgrades that were bought but not installed. Among those were nearly new Bilstein shocks and a Progress front sway bar. Since we had those, we figured we might as well install them. We invited ourselves over to Nick's "Goober Garage" to put them on there, since he had a Quickjack and, more importantly, experience with NCs. He set up a get-together for us car nerds on Facebook, with the name "Come fix Danny's Sloppy Seconds." And that's where the car earned its name. With the Bilsteins, the car worked well. The following weekend was the WDCR night autocross. It was an entertaining car under those circumstances, though hampered by a low rev limiter and poor alignment. On runs where I remembered to turn off the traction control, the car was a lot of fun to toss around, and not completely uncompetitive. This was encouraging. Kate couldn't drive the car for quite some time because she had one of her many ankle operations. I started driving the car off and on just to keep the rotors from getting rusty. The problem was, the more I drove it, the more I liked it. Then the slippery slope began. I wanted more low, and called Sam Strano to see if anyone made coilover sleeves for those Bilstein shocks. He said that such a thing did exist, but, he added, "you'll hate it." I asked how to lower the car more than it was without turning the car into something I'd hate. His answer: Ohlins. A week later, those coilovers arrived. Around the same time, I found a set of RPF1s in the chrome-like SBC finish. Over the winter, we continued with the plan to run STS primarily with Captain Slow, our '90 Miata. But I didn't rule out running Sloppy in STR off and on if I decided to make a change of pace. That happened more often than I expected. It started in May, when me and Sloppy took on the Cars & Cones Road Trip, a five-day odyssey that saw me run four autocrosses in three states with a group that was geared more towards putting on events for the muscle car/pro touring crowd. Sloppy was very good in that crowd, putting up a couple top-five finishes while also eating up all those interstate highway miles going from site to site. We also took on the Tail of the Dragon on the off-day, having an absolute epic time despite drizzly weather. Sloppy got its first serious accolades in July. I took the car to "Cruisin' Connecticut", which offered the chance at doing autocross on the kart track in Lime Rock Park's infield. We ran practice for an hour or so before timed runs, and we were told to watch our mirrors for those going faster and give way, and/or to back off when catching slower cars. I dutifully watched my mirrors, but nobody was catching me. On the other hand, I was running up on everyone else quite a bit. Now there were a lot of people who had never done any real performance driving, and a lot of cars that were very stock or not on the latest and greatest tires like I was. In timed runs, Sloppy was the fastest car there, with the second-best car over three seconds slower. Through the rest of 2016, Sloppy came out to play more often than I thought it would. We stayed with Slow for serious SCCA stuff, but for the less-serious stuff, the NC ran a lot more than the NA. It was still fairly competitive in those smaller clubs despite it being, essentially, a stock car with coilovers and wide wheels/tires. Fast forward to the 2016 SCCA Solo Nationals. At one of the banquets was me, Kate, Trevor, and Trevor's dad Bryan. We were just shooting the breeze, thinking about what we might be doing the next year, and somehow we all decided that we were going to team up and run Sloppy in STR Sloppy was going to need more work before it turned into a real STR car. We had adequate suspension, though softly-sprung compared to what the rest of the class was running. The biggest opportunities were under the hood. Moto-East provided an intake, header, and a remote tune. I installed the intake quickly, since it was a fairly easy task. I procrastinated on the header install, since it was a lot more involved, but eventually that job was done as well. When I configured the tune initially, I thought I had them set the rev limiter to 7500 RPM, which would be a nice upgrade over the stock 7000. But when I tested that at Spring Nationals, it didn't seem to translate into very much 2nd gear speed over stock. And that showed up on course, as we were bububububububububub all over the courses in both the Pro and the Tour. This didn't seem to hold the car back in the results, as Trevor made the Bonus Challenge and got all the way to the final pairing before losing, and special guest driver Shane ran third in STR in the Tour's first day before falling to 4th on the second day. But I thought that limiter was still holding us back. Through questionable internet research, I had convinced myself that I could probably get away with running the car all the way to 7800 RPM. There were certainly those who were skeptical that the engine would take that abuse, but I took my chances. The car was a fairly low-mile creampuff and healthy, so I rolled the dice and had the limiter cranked up while the car was dyno tuned at PTuning. The first time out with the new tune was the Toledo Pro Solo. I took the car out onto the street on Friday and figured out the launch control, and we got through practice starts just fine using that. Kate got through her Saturday morning L3 runs in one piece, and next up was Trevor in STR. He did OK in his first session, but brought the car back sputtering after his last run. We shut the car down and restarted everything to see if I could get runs in, but I only got one run in before deciding to park the car as it was definitely not running right. Initial diagnosis in impound was that what happened to the car had happened a few years earlier to James Dunham's NC. As it was explained to me, a sensor ring on the intake camshaft had slipped, and the car wasn't timed right. The likely fix was to delve into the cylinder head and replace the camshaft itself. With that sobering news, we loaded up the car, found other cars to jump into to finish the weekend, and think about dealing with the car after getting it home. I let the car sit on the trailer for the better part of two weeks before even thinking of doing something with it. Finally I summoned up the courage to at least ask some people for advice. A mechanic friend said that if I pulled the cam cover and timing covers, he'd come by and diagnose it. But I didn't want him making what would have been a pretty good haul on his time off. I took a ride up to York Automotive in Mount Airy and asked Ed about it. He asked if the car had thrown any codes. I said yes, the cam angle sensor code. He said, "change the sensor, what can it hurt?" I was pessimistic, but willing to make the investment on a small chance of this getting the car back running again. It's an easy sensor to change, and I hopped in the car to see if I was going to be lucky. It fired right up, but this was no indicator as it had started and idled fine even at its worst. When giving it gas is when it would stumble. But as I goosed the gas, the car revved without hesitation over and over. I was elated. My reports of Sloppy's demise being premature were met with pessimism. Yes, they said, it's fixed, but that doesn't mean it's going to be better for the long haul. Chances are, I was told, that if I were to start banging the car off the rev limiter in the future, I would probably be in the same boat with the fried sensor. I could also be staring down the camshaft replacement that I thought I'd successfully avoided. Half of me thought that we could possibly avoid future complications by going back to the tuner and have them lower the rev limiter to a more safe 7500 RPM, and set it so it had a "soft" rev limiter rather than the rather harsh limiter in it now. The soft limiter would lower the shock to the entire driveline, perhaps to the point where the (possibly) damaged camshaft would remain in its working-for-now shape for the foreseeable future. The more that I thought about this, though, the more that the realization that the NC1 just wasn't the right tool for the job at hand. If we tuned the engine for maximum - relatively speaking - reliability, we'd be back to where we were at Spring Nationals, where we were on that limiter for over half of the course on each run. That's not ideal, it's not fast, and it's frustrating while driving. Knowing this, the prudent thing to do was to retire Sloppy from competition. If it never sees the rev limiter again, it could theoretically run forever as a fun little commuter. This left Parsimonious Racing in a tough spot. What next? Go back to STS with the old car and rent-a-ride for the Pro Finale? Find a new car to build in a short time? We'll cover that the next time we put pen to paper here. Return To Front Page Parsimonious Racing News Archive
Endogenous physical regulation of population density in the freshwater protozoan Paramecium caudatum Studies confirm physical long-range cell-cell communication, most evidently based on electromagnetic fields. Effects concern induction or inhibition of cell growth. Their natural function is unclear. With the protozoan Paramecium caudatum I tested whether the signals regulate cell density and are electromagnetic. Up to 300 cells/mL, cell growth in clones of this study is decreasingly pronounced. Using cuvettes as chemical barriers enabling physical communication I placed 5 indicator cells/mL, the inducer populations, into smaller cuvettes that stand in bigger and contained 50, 100, 200 or 300 cells/mL. Under conditions of total darkness such pairs were mutually exposed for 48 hours. The hypothesis was that indicator cells, too, grow less the more neighbor cells there are. The bigger inducer populations were in the beginning the less they grew. The indicator populations grew accordingly; the more cells they were surrounded by the less they grew. The suppressing neighbors-effect disappeared when inner cuvettes were shielded by graphite known to shield electromagnetic radiation from GHz to PHz, i.e. to absorb energy from microwaves to light. These are the first results demonstrating non-contact physical quorum sensing for cell population density regulation. I assume rules intrinsic to electromagnetic fields interacting with matter and life. How does a population of cells in a multicellular organism maintain its cell density? Basic understanding is coming from studies with unicellular organisms where cells release chemical signals in dependence of cell density leading to a corresponding regulation of the cell cycle . Beside this molecule-based, i.e. chemical quorum sensing, there is also (contact-based) physical quorum sensing reported in the context of dispersal rates 4 . Further, effects among cells on cell cycle occur also through glass or quartz barriers where chemical signaling is prevented . These effects seem to be governed by physical factors, too, most evidently electromagnetic (EM) signals generated by the cells themselves 8,9 , enabling -due to their physical nature -to cross glass or quartz barriers 10 . In the present study, this seemingly non-chemical cell-to-cell communication was further investigated as its possible biological role was analyzed. The study organism was the freshwater protozoan Paramecium caudatum, as already used in previous studies by the present author showing intraspecific 11 as well as interspecific 12 effects from one population on the other across chemical barriers. One possible function of this non-chemical cell communication in Paramecium caudatum is assumed to be the regulation of population density. I distinct here the main-hypothesis of density regulation due to a physical signal that can trespass chemical barriers and the sub-hypothesis that these physical signals are of electromagnetic nature. Paramecium caudatum used in this study are maintained in microcosms where they reach an average density of about 300 cells/mL. Even though they receive food (bacteria) ad libidum and each cell occupies only about 0.1% of the average volume available for each cell one never finds the cells overshooting that particular density. In the present study I wanted to find out whether physical signals are used to regulate the cell density in Paramecium caudatum. For this, I placed cells at four different densities, (i.e., 50, 100, 200 and 300 cells per mL) into cuvettes. It is expected that low-density populations would still grow during the course of the experiment whereas populations with higher densities would have a slower growth, and those at carrying capacity would stop growing -or just stay in a balance between cell mortality and division. Yet, what if we place smaller cuvettes containing 5 cells (per mL) into those above-mentioned populations? Would these 5 tester cells act as indicators in that they would grow in dependence on the density of the population they are surrounded by? The hypothesis of a physical (presumably electromagnetic) cell density regulation is based on the conclusion that the bigger the outer population is at the onset of the experiment the lower would not only be their own growth but also the lower would be the growth of those 5 original (indicator) cells placed inside. This was assumed to be so because the presumed endogenous (electromagnetic) signal with which the inducer population regulates its own density trespasses to the inner cuvette, where the tester population adopts its growth decision accordingly, hence indicating that the big inducer population can regulate its density via non-chemical cell-to-cell communication. The results of this study deliver strong evidence for a physical regulation of population density: across chemical barriers the five inner cells grew less, the more neighbors they had. This indicates that the inducer populations released a signal that regulates cell density. When shielding against presumed electromagnetic signals, the growth decreasing effect from neighboring populations disappeared, supporting the assumption that an endogenous electromagnetic signal was transmitted from the outer inducer population to the inner tester population. Results Experiment 1: Density-effect. The inducer populations showed a highly significant negative relation between initial population size and cell division rate (ANOVA (linear fit): DF = 1; SS = 16.257; F-ratio = 610.6; p > F = 0.0001****) with the biggest populations (300 cells at origin) performing no more growth ( Fig. 1(b)). Cell division rates of tester cells and controls had similar mean values (ANOVA (linear fit): DF = 1; SS = 0.007; F-ratio = 0.148; p > F = 0.702 ns). In either of the above tests there was a highly significant effect from repeating the experiment (Tab 1), but no material effects due to different cuvette material (statistics not shown) which latter allowed to merge the glass and quartz treatment groups which leading then to n = 16 per treatment group. Regarding the working hypothesis, there was a strong significant negative effect between initial densities of inducer populations and cell division rates of the tester populations (Table 1, Fig. 1(c)). Cases of mortality in the inducer populations were omitted in the above analysis because this adds the variable mortality known for tremendous release of endogenously generated photons 13 . Mortality was assessed in 10 inducer populations with originally 300 cells/mL (8 cases) and 200 cells/mL (2 cases). While tester populations were positively correlated with inducer populations when the latter were growing (ANOVA (linear fit): DF = 1; SS = 0.870; F-ratio = 8.014; p > F = 0.0066**), note that they were negatively correlated when inducer populations displayed mortality i.e., tester populations grew significantly better the higher the mortality rate in the inducer populations (ANOVA (linear fit): DF = 1; SS = 0.490; F-ratio = 9.007; p > F = 0.017*). Experiment 2: Graphite-shielding. Tester populations that were shielded with graphite from inducer populations grew significantly better (ANOVA (linear fit): DF = 1; SS = 0.774; F-ratio = 14.437; p > F = 0.0014**) than those not shielded ( Fig. 1(e)). There was no difference in growth of inducer populations due to exposure to the graphite-layer on the outside of the inner cuvette (statistics not shown). Discussion The results of the present study provide strong evidence for a physical signal organizing the regulation of population density in the unicellular organism Paramecium caudatum. This physical signal is most probably of electromagnetic nature since the effect disappeared when shielding tester populations against electromagnetic signals assumed to be emitted by inducer populations. Further, under the shielding conditions, presumed volatiles (i.e., volatile chemical compounds mediating long-range cell-to-cell signaling) could still have induced an effect but, such an effect was absent. The absence of volatiles in the experimental system is extensively discussed elsewhere 11 . Since the results indicate that cells can sense the quorum, i.e., the number of cells the population consists in, I suggest here to interpret this study as evidence for a non-contact physical (most probably electromagnetic) quorum sensing. Despite the confirmation of the initial hypothesis, the study delivered two unexpected results: (i) The absence of effects from different material types (normal glass or quartz glass) of separating cuvettes was opposed to a previous study with the same cell system 11 . It suggests that during this experiment the inducing signals were in the wavelength-range that could be transmitted through both, glass and quartz cuvettes. Such wavelengths refer to the UV-A range and below. (ii) The effects coming from inducer populations that began to decrease in cell number during the experiment and were inducing a growth increase response in tester populations were an interesting side observation of the experiment and demands an intense investigation on effects of mortality on cell division rates and regulation, respectively. The major effects described in this study deliver indirect evidence that cells use signals that offshoot from the cellular generation of electromagnetic fields 8,14,15 . Surely, direct evidence is desirable, but as we are not technically able to (i) catch, (ii) store and (iii) release at will electromagnetic waves released by one subpopulation and emitted to another, the here-mentioned method for testing non-chemical cell-to-cell communication is in principal (still) the best; for a detailed discussion about this aspect please refer to Fels 10 . To get more information about the true nature of the signals, the graphite-shielding method is useful but delivers only indirect evidence. For direct evidence, therefore, the use of multimode optical fibers with a transmission window allowing optical communication is planned. The significance of this study is the support of the assumption that the non-avoidable endogenous generation of electromagnetic fields plays an active role in cell dynamics. This supports the basic hypothesis that the endogenous fields of the cell feedback on cell components some of which having generated these fields 16 . The work is, in addition, mutually supportive to giving credit to works on electrostatic or electromagnetic induction on cell processes being exogenous 17-20 or endogenous 21-24 . The results demand to give more attention to the physics of the cell. This may also include to look at effects from physics-based non-invasive therapies and basic research leading to such forms of therapy, respectively 25 . This, however, means that we cannot exclude the possibility that life's inner organization is also driven by rules intrinsic to electromagnetic fields interacting with matter and life. Materials and Methods The study organism. Paramecium caudatum (Phylum: Ciliata) is a freshwater protozoan of the Eurasian plate and a common research organism in different biological fields such as ecology and cell behaviour 26,27 or host-parasite interactions 28 . They are large cells with a length of about 250 µm and width of about 50 µm. In this study, I work with Paramecia that are maintained in an incubator with a 24 hrs cycle of 15 hrs artificial light at 23 °C and 9 hrs darkness at 18 °C. Once per week the densities of the cells were assessed with the use of a binocular microscope, glass wells and a hand counter. The cells were thereafter fed with a medium that contains the prokaryote Serratia sp. (on which Paramecia feed) and dried Lettuce (Lactuca sativa var. capitata) leaves (on which the prokaryotes feed). Under such conditions, different clones maintain clone specific densities. In this study, the clone K8 was used with an average maximum cell density, i.e., carrying capacity of 300 cells/mL (for more information refer 29 ). The glass-barrier method. The use of glass-barriers (i.e., cuvettes) goes back to the original works of Alexander Gurwitsch 5 who employed it to prevent transmission of chemical -but not electromagnetic -signals from one population to another, and was used in many variations ever since . In the present study two populations were separated from each other by using two sizes of cuvettes (vials). Big cuvettes with a base of 23 mm × 23 mm and a height of 40 mm, and small cuvettes with a base of 15 mm × 15 mm and a height of 45 mm the thickness of the walls being 1.5 mm (see Fig. 1(a)). The cuvettes consist either of normal glass (which is a filter for electromagnetic radiation in the optical ultra-violet (UV) spectrum, i.e., UV-B and UV-C ) or quartz glass allowing transmission of the whole UV range; for glass and quartz transmission spectra see elsewhere 11 . The separation of two cell populations. In order to separate two cell populations of Paramecium caudatum, one is placed in a small cuvette and the other in a bigger cuvette (for a detailed description see 10 ). The smaller cuvette is then placed into the big cuvette, leading to a chemical separation of these two populations ( Fig. 1(a)). Under the conditions of the experiment each cuvette contains 1 mL of medium (as described above) leading to a height of about 6 mm of medium in the inner and of about 4 mm in the outer cuvette (when containing a small inner cuvette). Such pairs of cuvettes (referred to as "units") are then randomly placed in a grid ( Fig. 1(d)); note that units were separated by black carbon paper disabling light transfer from one unit to the other. The grid itself stands in a lightproof box leading to total-darkness conditions during the experiment(s). Experiment 1: Density-effect. This experiment tested for a differing effect on tester populations due to differing cell densities in neighbouring inducer populations. At the beginning of an experiment the inducer populations contained either 50, 100, 200 or 300 cells/mL. These densities were obtained by diluting the well-grown clones -reaching population sizes of 300 cells/mL -with medium. The inducer populations were in the outer cuvettes while the tester populations were in the inner cuvettes. The tester populations consisted at the beginning of the experiment always of 5 (individually and randomly picked) cells. A control was added with no inducer population but 1 mL medium in the outer cuvette. The inner and outer cuvettes consisted either both of normal glass or both of quartz glass. An experimental block contained a random design ( Fig. 1(d)) of two replicates in a total of ten treatment groups and was kept for 48 hrs under conditions of total darkness in an incubator at (constant) 26 °C before assessing the growth of the cells. Note, Paramecia grow well at room temperature below 30 °C (26). The experiment itself was repeated four times leading to a sample size of n = 8 per treatment group (as there were no effects from separating material glass or quartz found , data could be merged leading to n = 16). Experiment 2: Graphite-shielding. Shielding is a commonly used method when looking for electromagnetic effects between organisms or cells 30,33,34 . If the signal in the Paramecium caudatum system is electromagnetic, then a thin layer of colloidal graphite around the inner cuvette should prevent electromagnetic signals 35 coming from the outer inducer population that could induce an effect on the inner tester population. Using purest colloidal graphite in solution (CRAMOLIN ® GRAPHIT) a graphite-layer was twice sprayed onto the bottom and up to a height of 15 mm on the outer side of the small cuvettes. Graphite has the capability of strongly decreasing the transmission of electromagnetic signals; it is known to shield electromagnetic field efficiently in the range of radiofrequency/microwave region up to wavelengths in the light spectrum, i.e. from GHz to PHz . The experiment consisted of five cells in the inner (small) cuvettes surrounded by 100 neighbouring cells and separated from each other either with or without an additional graphite-shield. Only quartz cuvettes were in use. An experimental block consisted of five replicates of each treatment group arranged in a random design. Each block was kept for 48 hrs under conditions of total darkness in an incubator at constantly 26 °C before assessing the growth of the cells. The experiment was repeated two times leading to a sample size of n = 10 per treatment group. Analysis. All data were log-transformed and analysed with JMP statistics 39 .
/** * @version $Rev$ $Date$ */ public class StaticJndiContextPlugin implements AppClientPlugin { private final Context context; public StaticJndiContextPlugin(Map context, Kernel kernel, ClassLoader classLoader) throws NamingException { // create ReadOnlyContext for (Iterator iterator = context.values().iterator(); iterator.hasNext();) { Object value = iterator.next(); if (value instanceof KernelAwareReference) { ((KernelAwareReference) value).setKernel(kernel); } if (value instanceof ClassLoaderAwareReference) { ((ClassLoaderAwareReference) value).setClassLoader(classLoader); } } this.context = EnterpriseNamingContext.createEnterpriseNamingContext(context); } public void startClient(ObjectName appClientModuleName, Kernel kernel, ClassLoader classLoader) throws Exception { RootContext.setComponentContext(context); System.setProperty("java.naming.factory.initial", "com.sun.jndi.rmi.registry.RegistryContextFactory"); System.setProperty("java.naming.factory.url.pkgs", "org.apache.geronimo.naming"); // System.setProperty("java.naming.provider.url", "rmi://localhost:1099"); new InitialContext().lookup("java:comp/env"); } public void stopClient(ObjectName appClientModuleName) throws Exception { RootContext.setComponentContext(null); } public static final GBeanInfo GBEAN_INFO; static { GBeanInfoBuilder infoFactory = GBeanInfoBuilder.createStatic(StaticJndiContextPlugin.class); infoFactory.addAttribute("context", Map.class, true); infoFactory.addAttribute("kernel", Kernel.class, false); infoFactory.addAttribute("classLoader", ClassLoader.class, false); infoFactory.addInterface(AppClientPlugin.class); infoFactory.setConstructor(new String[]{"context", "kernel", "classLoader"}); GBEAN_INFO = infoFactory.getBeanInfo(); } public static GBeanInfo getGBeanInfo() { return GBEAN_INFO; } }
Capacity of 1-to-K Broadcast Packet Erasure Channels with Channel Output Feedback (Full Version) This paper focuses on the 1-to-K broadcast packet erasure channel (PEC), which is a generalization of the broadcast binary erasure channel from the binary symbol to that of arbitrary finite fields GF(q) with sufficiently large q. We consider the setting in which the source node has instant feedback of the channel outputs of the K receivers after each transmission. The capacity region of the 1-to-K PEC with COF was previously known only for the case K=2. Such a setting directly models network coded packet transmission in the downlink direction with integrated feedback mechanisms (such as Automatic Repeat reQuest (ARQ)). The main results of this paper are: (i) The capacity region for general 1-to-3 broadcast PECs, and (ii) The capacity region for two types of 1-to-$K$ broadcast PECs: the symmetric PECs, and the spatially independent PECs with one-sided fairness constraints. This paper also develops (iii) A pair of outer and inner bounds of the capacity region for arbitrary 1-to-K broadcast PECs, which can be easily evaluated by any linear programming solver. The proposed inner bound is proven by a new class of intersession network coding schemes, termed the packet evolution schemes, which is based on the concept of code alignment in GF(q) that is in parallel with the interference alignment techniques for the Euclidean space. Extensive numerical experiments show that the outer and inner bounds meet for almost all broadcast PECs encountered in practical scenarios and thus effectively bracket the capacity of general 1-to-K broadcast PECs with COF. I. INTRODUCTION Broadcast channels have been actively studied since the inception of network information theory. Although the broadcast capacity region remains unknown for general channel models, significant progress has been made in various sub-directions (see for a tutorial paper), including but not limited to the degraded broadcast channel models , the 2-user capacity with degraded message sets or with message side information . Motivated by wireless broadcast communications, the Gaussian broadcast channel (GBC) is among the most widely studied broadcast channel models. In the last decade, the new network coding concept has emerged , which focuses on achieving the capacity of a communication network. More explicitly, the network-codingbased approaches generally model each hop of a packet-based communication network by a packet erasure channel (PEC) instead of the classic Gaussian channel. Such simple abstraction allows us to explore the information-theoretic capacity of a much larger network with mathematical rigor and also sheds new insights on the network effects of a communication system. One such example is that when all destinations are interested in the same set of packets, the capacity of any arbitrarily large, multi-hop PEC network can be characterized by the corresponding min-cut/max-flow values , . Another example is the broadcast channel capacity with message side information. Unlike the existing GBC-based results that are limited to the simplest 2-user scenario , the capacity region for 1-to-K broadcast PECs with message side information has been derived for K = 3 and tightly bounded for general K values , . 1 In addition to providing new insights on network communications, this simple PEC-based abstraction in network coding also accelerates the transition from theory to practice. Many of the capacity-achieving network codes have since been implemented for either the wireline or the wireless multi-hop networks , . Motivated by the state-of-the-art wireless network coding protocols and the corresponding applications, this paper studies the memoryless 1-to-K broadcast PEC with Channel Output Feedback (COF). Namely, a single source node sends out a stream of packets wirelessly, which carries information of K independent downlink data sessions, one for each receiver d k , k = 1, · · · , K, respectively. Due to the randomness of the underlying wireless channel condition, which varies independently for each time slot, each transmitted packet may or may not be heard by a receiver d k . After packet transmission, each d k then informs the source its own channel output by sending back the ACKnowledgement (ACK) packets periodically (batch feedback) or after each time slot (perpacket instant feedback) . derives the capacity region of the memoryless 1-to-2 broadcast PEC with COF. The results show that COF strictly improves the capacity of the memoryless 1-to-2 broadcast PEC, which is in sharp contrast with the classic result that feedback does not increase the capacity for any memoryless 1-to-1 channel. can also be viewed as a mirroring result to the achievability results of GBCs with COF . It is worth noting that other than increasing the achievable throughput, COF can also be used for queue and delay management , and for rate-control in a wireless network coded system . The main contribution of this work includes: (i) The capacity region for general 1-to-3 broadcast PECs with COF; (ii) The capacity region for two types of 1-to-K broadcast PECs with COF: the symmetric PECs, and the spatially independent PECs with one-sided fairness constraints; and (iii) A pair of outer and inner bounds of the capacity region for general 1-to-K broadcast PECs with COF, which can be easily evaluated by any linear programming solver. Extensive numerical experiments show that the outer and inner bounds meet for almost all broadcast PECs encountered in practical scenarios and thus effectively bracket the exact capacity region. The capacity outer bound in this paper is derived by generalizing the degraded channel argument first proposed in . For the achievability part of (i), (ii), and (iii), we devise a new class of inter-session network coded schemes, termed the packet evolution method. The packet evolution method is based on a novel concept of network code alignment, which is the PEC-counterpart of the interference alignment method originally proposed for Gaussian interference channels , . It is worth noting that in addition to the random PEC model in this paper, there are other promising channel models that also greatly facilitate capacity analysis for larger networks. One such example is the deterministic wireless channel model proposed in , which can also be viewed as a deterministic degraded binary erasure channel. The rest of this paper is organized as follows. Section II contains the basic setting as well as the detailed comparison to the existing results in , , via an illustrating example. Section III describes the main theorems of this paper and the proof of the converse theorem. In particular, Section III-A focuses on the capacity results for arbitrary broadcast PEC parameters while Section III-B considers two special types of broadcast PECs: the symmetric and the spatially independent PECs, respectively. Section IV introduces a new class of network coding schemes, termed the packet evolution (PE) method. Based on the PE method, Section V outlines the proofs of the achievability results in Section III. Some theoretic implications and discussions are included in Section VI. Section VII concludes this paper. A. The Memoryless 1-to-K Broadcast Packet Erasure Channel For any positive integer K, we use ∆ = {1, 2, · · · , K} to denote the set of integers from 1 to K, and use 2 to denote the collection of all subsets of . Consider a 1-to-K broadcast PEC from a single source s to K destinations d k , k ∈ . For each channel usage, the 1-to-K broadcast PEC takes an input symbol Y ∈ GF(q) from s and outputs a K-dimensional vector Z ∆ = (Z 1 , · · · , Z K ) ∈ ({Y }∪{ * }) K , where the k-th coordinate Z k being " * " denotes that the transmitted symbol Y does not reach the k-th receiver d k (thus being erased). We also assume that there is no other type of noise, i.e., the individual output is either equal to the input Y or an erasure " * ." The success probabilities of a 1-to-K PEC are described by 2 K non-negative parameters: p S \S for all S ∈ 2 such that S∈2 That is, p S \S denotes the probability that the transmitted symbol Y is received by and only by the receivers {d k : k ∈ S}. In addition to the joint probability mass function p S \S of the success events, the following notation will be used frequently in this work. For all S ∈ 2 , we define That is, p ∪S is the probability that at least one of the receiver d k in S successfully receives the transmitted symbol Y . For example, when K = 2, is the probability that at least one of d 1 and d 2 receives the transmitted symbol Y . We sometimes use p k as shorthand for p ∪{k} , which is the marginal probability that the k-th receiver d k receives Y successfully. We can repeatedly use the channel for n time slots and let Y (t) and Z(t) denote the input and output for the t-th time slot. We assume that the 1-to-K broadcast PEC is memoryless and time-invariant, i.e., for any given function y(·) : → GF(q), which is a function f t (·) based on the information symbols {X k,j } and the COF {Z(τ ) : τ ∈ } of the previous transmissions. In the end of the n-th time slot, each d k outputs the decoded symbolŝ where g k (·) is the decoding function of d k based on the corresponding observation Z k (t) for t ∈ . Note that we assume that the PEC channel parameters p S \S : ∀S ∈ 2 are available at s before transmission. See Fig. 1 for illustration. We now define the achievable rate of a 1-to-K broadcast PEC with COF. Definition 1: A rate vector (R 1 , · · · , R K ) is achievable if for any ǫ > 0, there exist sufficiently large n and sufficiently large underlying finite field GF(q) such that ∀k ∈ , Prob X k = X k < ǫ. Definition 2: The capacity region of a 1-to-K broadcast PEC with COF is defined as the closure of all achievable rate vectors (R 1 , · · · , R K ). C. Existing Results The capacity of 1-to-2 broadcast PECs with COF has been characterized in : Theorem 1 (Theorem 3 in ): The capacity region (R 1 , R 2 ) of a 1-to-2 broadcast PEC with COF is described by One scheme that achieves the above capacity region in (2) is the 2-phase approach in . That is, for any (R 1 , R 2 ) in the interior of (2), perform the following coding operations. In Phase 1, the source s sends out uncoded information packets X 1,j1 and X 2,j2 for all j 1 ∈ and j 2 ∈ until each packet is received by at least one receiver. Those X 1,j1 packets that are received by d 1 have already reached their intended receiver and thus will not be retransmitted in the second phase. Those X 1,j1 packets that are received by d 2 (a) Sending the first Phase-2 packet . (b) The optimal coding operation after sending the . but not by d 1 need to be retransmitted in the second phase, and are thus stored in a separate queue Q 1;21 . Symmetrically, the X 2,j2 packets that are received by d 1 but not by d 2 need to be retransmitted, and are stored in another queue Q 2;12 . Since those "overheard" packets in queues Q 1;21 and Q 2;12 are perfect candidates for intersession network coding , they can be linearly mixed together in Phase 2. Each single coded packet in Phase 2 can now serve both d 1 and d 2 simultaneously. The intersession network coding gain in Phase 2 allows us to achieve the capacity region in (2). Based on the same logic, derives an achievability region for 1-to-K broadcast PECs with COF under a perfectly symmetric setting. The main idea can be viewed as an extension of the above 2-phase approach. That is, for Phase 1, the source s sends out all X k,j , ∀k ∈ , j ∈ , until each of them is received by at least one of the receivers {d k : k ∈ }. Those X k,j packets that are received by d k have already reached their intended destination and will not be transmitted in Phase 2. Those X k,j packets that are received by some other d i but not by d k are the "overheard packets," and could potentially be mixed with packets of the i-th session. In Phase 2, source s takes advantage of all the coding opportunities created in Phase 1 and mixes the packets of different sessions to capitalize the network coding gain. implements such 2phase approach while taking into account of various practical considerations, such as time-out and network synchronization. D. The Suboptimality of The 2-Phase Approach Although being throughput optimal for the simplest K = 2 case, the above 2-phase approach does not achieve the capacity for the cases in which K > 2. To illustrate this point, consider the example in Fig. 2. In Fig. 2(a), source s would like to serve three receivers d 1 to d 3 . Each (s, d k ) session contains a single information packet X k , and the goal is to convey each X k to the intended receiver d k for all k = 1, 2, 3. Suppose the 2-phase approach in Section II-C is used. During Phase 1, each packet is sent repeatedly until it is received by at least one receiver, which either conveys the packet to the intended receiver or creates an overheard packet that can be used in Phase 2. Suppose after Phase 1, d 1 has received X 2 and X 3 , d 2 has received X 1 and X 3 , and d 3 has not received any packet ( Fig. 2(a)). Since each packet has reached at least one receiver, source s moves to Phase 2. One can easily check that if s sends out a coded packet in Phase 2, such packet can serve both d 1 and d 2 . That is, d 1 (resp. d 2 ) can decode X 1 (resp. X 2 ) by subtracting X 2 (resp. X 1 ) from . Nonetheless, since the broadcast PEC is random, the coded packet may or may not reach d 1 or d 2 . Suppose that due to random channel realization, reaches only d 3 , see Fig. 2(a). The remaining question is what s should send for the next time slot. For the following, we compare the existing 2-phase approach and a new optimal decision. The existing 2-phase approach: We first note that since d 3 received neither X 1 nor X 2 in the past, the newly received cannot be used by d 3 to decode any information packet. In the existing results , , , d 3 thus discards the overheard , and s would continue sending for the next time slot in order to capitalize this coding opportunity created in Phase 1. The optimal decision: It turns out that the broadcast system can actually benefit from the fact that d 3 overhears the coded packet even though neither X 1 nor X 2 can be decoded by d 3 . More explicitly, instead of sending , s should send a new packet that mixes all three sessions together. With the new (see Fig. 2(b) for illustration), d 1 can decode the desired X 1 by subtracting both X 2 and X 3 from . d 2 can decode the desired X 2 by subtracting both X 1 and X 3 from . For d 3 , even though d 3 does not know the values of X 1 and X 2 , d 3 can still use the previously overheard packet to subtract the interference (X 1 + X 2 ) from and decode its desired packet X 3 . As a result, the new coded packet serves all destinations d 1 , d 2 , and d 3 , simultaneously. This new coding decision thus strictly outperforms the existing 2-phase approach. Two critical observations can be made for this example. First of all, when d 3 overhears a coded packet, even though d 3 can decode neither X 1 nor X 2 , such new side information can still be used for future decoding. More explicitly, as long as s sends packets that are of the form α(X 1 + X 2 ) + βX 3 , the "aligned interference" α(X 1 + X 2 ) can be completely removed by d 3 without decoding individual X 1 and X 2 . This technique is thus termed "code alignment," which is in parallel with the interference alignment method used in Gaussian interference channels . Second of all, in the existing 2-phase approach, Phase 1 has the dual roles of sending uncoded packets to their intended receivers, and, at the same time, creating new coding opportunities (the overheard packets) for Phase 2. It turns out that this dual-purpose Phase-1 operation is indeed optimal (as will be seen in Sections IV and V). The suboptimality of the 2-phase approach for K > 2 is actually caused by the Phase-2 operation, in which source s only capitalizes the coding opportunities created in Phase 1 but does not create any new coding opportunities for subsequent packet mixing. One can thus envision that for the cases K > 2, an optimal policy should be a multi-phase policy, say an Mphase policy, such that for all i ∈ (not only for the first phase) the packets sent in the i-th phase have dual roles of sending the information packets to their intended receivers and simultaneously creating new coding opportunities for the subsequent Phases (i + 1) to M . These two observations will be the building blocks of our achievability results. III. THE MAIN RESULTS We have two groups of results: one is for general 1-to-K broadcast PECs with arbitrary values of the PEC parameters, and the other is for 1-to-K broadcast PECs with some restrictive conditions on the values of the PEC parameters. A. Capacity Results For General 1-to-K Broadcast PECs We define any bijective function π : → as a K-permutation and we sometimes just say that π is a permutation whenever it is clear from the context that we are focusing on . There are totally K! distinct K-permutations. Given any K-permutation π, for all j ∈ we define S π j ∆ = {π(l) : ∀l ∈ } as the set of the first j elements according to the permutation π. We then have the following capacity outer bound for any 1-to-K broadcast PEC with COF. Proposition 1: Recall the definition of p ∪S in (1). Any achievable rates (R 1 , · · · , R K ) must satisfy the following K! inequalities: Proof: Proposition 1 can be proven by a simple extension of the outer bound arguments used in , . (Note that when K = 2, Proposition 1 collapses to Theorem 3 of .) For any given permutation π, consider a new broadcast channel with (K − 1) artificially created information pipes connecting all the receivers d 1 to d K . More explicitly, for all j ∈ , create an auxiliary pipe from d π(j) to d π(j+1) . See Fig. 3 for illustration. With the auxiliary pipes, any destination d π(j) , j ∈ , not only observes the corresponding output Z π(j) of the broadcast PEC but also has all the information Z π(l) of its "upstream receivers" d π(l) for all l ∈ . Since we only create new pipes, any achievable rates of the original 1-to-K broadcast PEC with COF must also be achievable in the new 1-to-K broadcast PEC with COF in Fig. 3. The capacity of the new 1-to-K broadcast PEC with COF is thus an outer bound on the capacity of the original 1-to-K broadcast PEC with COF. On the other hand, the new 1-to-K broadcast PEC in Fig. 3 is a physically degraded broadcast channel with the new success probability of d k being p ∪S π k instead of p π(k) (see Fig. 3). shows that COF does not increase the capacity of any physically degraded broadcast channel. Therefore the capacity of the new 1-to-K broadcast PEC with COF is identical to the capacity of the new 1-to-K broadcast PEC without COF. Since (3) is the capacity of the new 1-to-K broadcast PEC without COF, (3) must be an outer bound of the capacity of the original 1-to-K PEC with COF. By considering different permutation π, the proof of Proposition 1 is complete. For the following, we first provide the capacity results for general 1-to-3 broadcast PECs. We then state an achievability inner bound for general 1-to-K broadcast PECs with COF for arbitrary K values, which, together with the outer bound in Proposition 1 can effectively bracket the capacities for the cases in which K ≥ 4. To state the capacity inner bound, we need to define an additional function: f p (ST ), which takes an input ST of two disjoint sets S, T ∈ 2 . More explicitly, we define f p (ST ) as the probability that a packet Y , transmitted through the 1-to-K PEC, is received by all those d i with i ∈ S and not received by any d j with j ∈ T . For example, f p (S \S) = p S \S for all S ∈ 2 . For arbitrary disjoint S and T , we thus have We also say that a strict total ordering "≺" on For example, for K = 3, the following strict total ordering is cardinality-compatible. Proposition 3: Fix any arbitrary cardinality-compatible, strict total ordering ≺. For any general 1-to-K broadcast PEC with COF, a rate vector (R 1 , · · · , R K ) can be achieved by a linear network code if there exist 2 K non-negative x variables, indexed by S ∈ 2 : and K3 K−1 non-negative w variables, indexed by (k; S → T ) satisfying T ⊆ S ⊆ ( \k): such that jointly the following linear inequalities 2 are satisfied: Since Proposition 3 holds for any cardinality-compatible, strict total ordering ≺. We can easily derive the following corollary: To distinguish different strict total orderings, we append a subscript l to ≺. For example, ≺ 1 and ≺ 2 correspond to two distinct strict total orderings. Overall, there are L ∆ = K k=0 K k ! distinct strict total ordering ≺ l , ∀l ∈ , that are cardinality-compatible. Corollary 1: For any given cardinality-compatible strict total ordering ≺ l , we use Λ l to denote the collection of all (R 1 , · · · , R K ) rate vectors satisfying Proposition 3. Then the convex hull of Co ({Λ l : ∀l ∈ }) is an achievable region of the given 1-to-K broadcast PEC with COF. Remark: For some general classes of PEC parameters, one can prove that the inner bound of Proposition 3 is indeed the capacity region for arbitrary K ≥ 4 values. Two such classes are discussed in the next subsection. B. Capacity Results For Two Classes of 1-to-K Broadcast PECs We first provide the capacity results for symmetric broadcast PECs. Definition 3: That is, the success probability p S \S depends only on |S|, the size of S, and does not depend on which subset of receivers being considered. Proposition 4: For any symmetric 1-to-K broadcast PEC with COF, the capacity outer bound in Proposition 1 is indeed the corresponding capacity region. The perfect channel symmetry condition in Proposition 4 may be a bit restrictive for real environments as most broadcast channels are non-symmetric. A more realistic setting is to allow channel asymmetry while assuming spatial independence between different destinations d i . Definition 4: A 1-to-K broadcast PEC is spatially independent if the channel parameters p S \S : ∀S ∈ 2 satisfy where p k is the marginal success probability of destination d k . Note: A symmetric 1-to-K broadcast PEC needs not be spatially independent. A spatially independent PEC is symmetric if p 1 = p 2 = · · · = p K . To describe the capacity results for spatially independent 1-to-K PECs, we need the following additional definition. Definition 5: Consider a 1-to-K broadcast PEC with marginal success probabilities p 1 to p K . Without loss of generality, assume p 1 ≤ p 2 ≤ · · · ≤ p K , which can be achieved by relabeling. We say a rate vector (R 1 , · · · , R K ) is one-sidedly fair if We use Λ osf to denote the collection of all one-sidedly fair rate vectors. The one-sided fairness contains many practical scenarios of interest. For example, the perfectly fair rate vector (R, R, · · · , R) by definition is also one-sidedly fair. Another example is when min(p 1 , · · · , p K ) > 1 2 and we allow the rate R k to be proportional to the corresponding marginal success probability p k , i.e., R k = p k R, then the rate vector (p 1 R, p 2 R, · · · , p K R) is also one-sidedly fair. For the following, we provide the capacity of spatially independent 1-to-K PECs with COF under the condition of one-sided fairness. Proposition 5: Suppose the 1-to-K PEC of interest is spatially independent with marginal success probabilities 0 < p 1 ≤ p 2 ≤ · · · ≤ p K . Any one-sidedly fair rate vector (R 1 , · · · , R K ) ∈ Λ osf is in the capacity region if and only if (R 1 , · · · , R K ) ∈ Λ osf satisfies Proposition 5 implies that Proposition 1 is indeed the capacity region when focusing on the one-sidedly fair rate region Λ osf . IV. THE PACKET EVOLUTION SCHEMES For the following, we describe a new class of coding schemes, termed the packet evolution (PE) scheme, which embodies the concept of code alignment and achieves (near) optimal throughput. The PE scheme is the building block of the capacity / achievability results in Section III. A. Description Of The Packet Evolution Scheme The packet evolution scheme is described as follows. Recall that each (s, d k ) session has nR k information packets X k,1 to X k,nR k . We associate each of the K k=1 nR k information packets with an intersession coding vector v and a set S ⊆ . An intersession coding vector is a K k=1 nR kdimensional row vector with each coordinate being a scalar in GF(q). Before the start of the broadcast, for any k ∈ and j ∈ we initialize the corresponding vector v of X k,j in a way that the only nonzero coordinate of v is the coordinate corresponding to X k,j and all other coordinates are zero. Without loss of generality, we set the value of the only non-zero coordinate to one. That is, initially the coding vectors v are set to the elementary basis vectors of the entire K k=1 nR k -dimensional message space. For any k ∈ and j ∈ the set S of X k,j is initialized to ∅. As will be clear shortly after, we call S the overhearing set 3 of the packet X k,j . For easier reference, we use v(X k,j ) and S(X k,j ) to denote the intersession coding vector and the overhearing set of X k,j . Throughout the n broadcast time slots, source s constantly updates the S(X k,j ) and v(X k,j ) according to the COF. The main structure of a packet evolution scheme can now be described as follows. § THE PACKET EVOLUTION SCHEME 1: Source s maintains a single flag f change . Initially, set f change ← 1. 2: for t = 1, · · · , n, do 3: In the beginning of the t-th time slot, do Lines 4 to 10. 4: if f change = 1 then 6: Run a subroutine PACKET SELECTION, which takes T as input and outputs a collection of |T | packets {X k,j k : ∀k ∈ T }, termed the target packets, for which all X k,j k satisfy (S(X k,j k ) ∪ {k}) ⊇ T . 7: Generate |T | uniformly random coefficients c k ∈ GF(q) for all k ∈ T and construct an intersession coding vector v tx ← k∈T c k · v(X k,j k ). 8: Set f change ← 0. Sends out a linearly intersession coded packet according to the coding vector v tx . That is, we send where (X 1,1 , · · · , X K,nRK ) T is a column vector consisting of all information symbols. 4 11: In the end of the t-th time slot, use a subroutine UPDATE to revise the v(X k,j k ) and S(X k,j k ) values of all target packets X k,j k based on the COF. 12: if the S(X k,j k ) value changes for at least one target packet X k,j k after the UPDATE then 13: Set f change ← 1. 14: end if 15: end for In summary, a group of target packets {X k,j k } are selected according to the choice of the subset T . The corresponding vectors {v(X k,j k )} are used to construct a coding vector v tx . The same coded packet Y tx , corresponding to v tx , is then sent repeatedly for many time slots until one of the target packets X k,j k evolves (when the corresponding S(X k,j k ) changes). Then a new subset T is chosen and the process is repeated until we use up all n time slots. Three subroutines are used as the building blocks of a packet evolution method: (i) How to choose the non-empty T ⊆ ; (ii) For each k ∈ , how to select a single target packets X k,j k among all X k,j satisfying (S(X k,j ) ∪ {k}) ⊇ T ; and (iii) How to update the coding vectors v(X k,j k ) and the overhearing sets S(X k,j k ). For the following, we first describe the detailed update rules. § UPDATE OF S(X k,j k ) AND v(X k,j k ) 1: Input: The T and v tx used for transmission in the current time slot; And S rx , the set of destinations d i which receive the transmitted coded packet in the current time slot. (S rx is obtained through the COF in the end of the current time slot.) 4 It is critical to note that the coding operation is based purely on vtx rather than on the list of the target packets X k,j k . Once vtx is decided, we create a new coded packet based on the coordinates of vtx. It is possible that vtx has non-zero coordinates corresponding to some X k ′ ,j that are not one of the target packets X k,j k . Those X k ′ ,j will participate in creating the coded packet. Then after UPDATE, the summary of the packets becomes . From the above step-by-step illustration, we see that the optimal coding policy in Section II-D is a special case of a packet evolution scheme. B. Properties of A Packet Evolution Scheme We term the packet evolution (PE) scheme in Section IV-A a generic PE method since it does not depend on how to choose T and the target packets X k,j k and only requires the output of PACKET SELECTION satisfying (S(X k,j k ) ∪ {k}) ⊇ T, ∀k ∈ T . In this subsection, we state some key properties for any generic PE scheme. The intuition of the PE scheme is based on these key properties and will be discussed further in Section IV-C. We first define the following notation for any linear network codes. (Note that the PE scheme is a linear network code.) Definition 6: Consider any linear network code. For any destination d k , each of the received packet Z k (t) can be represented by a vector w k (t), which is a K k=1 nR kdimensional vector containing the coefficients used to generate Z k (t). That is, Z k (t) = w k (t) · (X 1,1 , · · · , X K,nRK ) T . If Z k (t) is an erasure, we simply set w k (t) to be an all-zero vector. The knowledge space of destination d k in the end of time t is denoted by Ω Z,k (t), which is the linear span of w k (τ ), Definition 7: For any non-coded information packet X k,j , the corresponding intersession coding vector is a K k=1 nR k -dimensional vector with a single one in the corresponding coordinate and all other coordinates being zero. We use δ k,j to denote such a delta vector. The message space of d k is then defined as Ω M,k = span(δ k,j : ∀j ∈ ). With the above definitions, we have the following straightforward lemma: Lemma 1: In the end of time t, destination d k is able to decode all the desired information packets X k,j , ∀j ∈ , if and only if Ω M,k ⊆ Ω Z,k (t). We now define "non-interfering vectors" from the perspective of a destination d k . Definition 8: In the end of time t (or in the beginning of time (t + 1)), a vector v (and thus the corresponding coded packet) is "non-interfering" from the perspective of d k if v ∈ span(Ω Z,k (t), Ω M,k ). We note that any non-interfering vector v can always be expressed as the sum of two vectors v ′ and w, where v ′ ∈ Ω M,k is a linear combination of all information vectors for d k and w ∈ Ω Z,k (t) is a linear combination of all the packets received by d k . If v ′ = 0, then v = w is a transparent packet from d k 's perspective since d k can compute the value of w·(X 1,1 , · · · , X K,nRK ) T from its current knowledge space Ω Z,k (t). If v ′ = 0, then v = v ′ + w can be viewed as a pure information packet v ′ ∈ Ω M,k after subtracting the unwanted w vector. In either case, v is not interfering with the transmission of the (s, d k ) session, which gives the name of "non-interfering vectors." The following Lemmas 2 and 3 discuss the time dynamics of the PE scheme. To distinguish different time instants, we add a time subscript and use S t−1 (X k,j k ) and S t (X k,j k ) to denote the overhearing set of X k,j k in the end of time (t − 1) and t, respectively. Similarly, v t−1 (X k,j k ) and v t (X k,j k ) denote the coding vectors in the end of time (t − 1) and t, respectively. Lemma 3: In the end of the t-th time slot, we use Ω R,k (t) to denote the remaining space of the PE scheme: For any n and any ǫ > 0, there exists a sufficiently large finite field GF(q) such that for all k ∈ and t ∈ , Intuitively, Lemma 3 says that if in the end of time t we directly transmit all the remaining coded packets {v t (X k,j ) : ∀j ∈ , k / ∈ S t (X k,j )} from s to d k through a noise-free information pipe, then with high probability, d k can successfully decode all the desired information packets X k,1 to X k,nR k (see Lemma 1) by the knowledge space Ω Z,k (t) and the new information of the remaining space Ω R,k (t). Lemma 3 directly implies the following corollary. Corollary 2: For any n and any ǫ > 0, there exists a sufficiently large finite field GF(q) such that the following statement holds. If in the end of the n-th time slot, all information packets X k,j have S n (X k,j ) ∋ k, then Proof: If in the end of the n-th time slot, all X k,j have S n (X k,j ) ∋ k, then the corresponding Ω R,k (n) = {0} contains only the origin for all k ∈ . Therefore, Corollary 2 is simply a restatement of Lemmas 1 and 3. To illustrate Corollary 2, consider our 5-time-slot example. In the end of Slot 5, since k ∈ S(X k ) for all k ∈ {1, 2, 3}, Corollary 2 guarantees that with high probability all d k can decode the desired X k , which was first observed in the example of Section II-D. The proofs of Lemmas 2 and 3 are relegated to Appendices A and B, respectively. C. The Intuitions Of The Packet Evolution Scheme Lemmas 2 and 3 are the key properties of a PE scheme. In this subsection, we discuss the corresponding intuitions. Receiving the information packet X k,j : Each information packet keeps a coding vector v(X k,j ). Whenever we would like to communicate X k,j to destination d k , instead of sending a non-coded packet X k,j directly, we send an intersession coded packet according to the coding vector v(X k,j ). Lemma 3 shows that if we send all the coded vectors v(X k,j ) that have not been heard by d k (with k / ∈ S(X k,j )) through a noise-free information pipe, then d k can indeed decode all the desired packets X k,j with close-to-one probability. It also implies, although in an implicit way, that once a v(X k,j0 ) is heard by d k for some j 0 (therefore k ∈ S(X k,j0 )), there is no need to transmit this particular v(X k,j0 ) in the later time slots. Jointly, these two implications show that we can indeed use the coded packet v(X k,j ) as a substitute for X k,j without losing any information. In the broadest sense, we can say that d k receives a packet X k,j if the corresponding v(X k,j ) successfully arrives d k in some time slot t. For each X k,j , the set S(X k,j ) serves two purposes: (i) Keep track of whether its intended destination d k has received this X k,j (through the v(X k,j )), and (ii) Keep track of whether v(X k,j ) is non-interfering to other destinations d i , i = k. We discuss these two purposes separately. Tracking the reception of the intended d k : We first note that in the end of time 0, d k has not received any packet and we indeed have k / ∈ S(X k,j ) = ∅. We then notice that for any given X k,j , the set S(X k,j ) evolves over time. By Line 4 of the UPDATE, we can prove that as time proceeds, the first time t 0 such that k ∈ S(X k,j ) must be the first time when X k,j is received by d k (i.e., X k,j is chosen in the beginning of time t and k ∈ S rx in the end of time t). One can also show that for any X k,j once k ∈ S t0 (X k,j ) in the end of time t 0 for some t 0 , we will have k ∈ S t (X k,j ) for all t ≥ t 0 . By the above reasonings, checking whether k ∈ S(X k,j ) indeed tells us whether the intended receiver d k has received X k,j . Tracking the non-interference from the perspective of d i = d k : Lemma 2 also ensures that v(X k,j ) is noninterfering from d i 's perspective for any i ∈ S(X k,j ), i = k. Therefore S(X k,j ) successfully tracks whether v(X k,j ) is non-interfering from the perspectives of d i , i = k. Serving multiple destinations simultaneously by mixing non-interfering packets: The above discussion ensures that when we would like to send an information packet X k,j k to d k , we can send a coded packet v(X k,j k ) as an informationlossless substitute. On the other hand, by Lemma . Therefore, instead of sending a single packet v(X k,j k ), it is beneficial to combine the transmission of two packets v(X k,j k ) and v(X l,j l ) together, as long as l ∈ S(X k,j k ) and k ∈ S(X l,j l ). More explicitly, suppose we simply add the two packets together and transmit a packet receives v(X k,j k ) without any interference. By generalizing this idea, a PE scheme first selects a T ⊆ and then choose all X k,j k such that k ∈ T and v(X k,j k ) are noninterfering from d l 's perspective for all l ∈ T \k (see Line 6 of the PE scheme). This thus ensures that the coded packet v tx in Line 7 of the PE scheme can serve all destinations k ∈ T simultaneously. Creating new coding opportunities while exploiting the existing coding opportunities: As discussed in the example of Section II-D, the suboptimality of the existing 2-phase approach for K ≥ 3 destinations is due to the fact that it fails to create new coding opportunities while exploiting old coding opportunities. The PE scheme was designed to solve this problem. More explicitly, for each X k,j the v(X k,j ) is non-interfering for all d i satisfying i ∈ (S(X k,j ) ∪ {k}). Therefore, the larger the set S(X k,j ) is, the larger the number of sessions that can be coded together with v(X k,j ). To create more coding opportunities, we thus need to be able to enlarge the S(X k,j ) set over time. Let us assume that the PACKET SELECTION in Line 6 chooses the X k,j such that S(X k,j ) = T \k. That is, we choose the X k,j that can be mixed with those (s, d l ) sessions with l ∈ S(X k,j ) ∪ {k} = T . Then Line 4 of the UPDATE guarantees that if some other d i , i / ∈ T , overhears the coded transmission, we can update S(X k,j ) with a strictly larger set S(X k,j ) ∪ S rx . Therefore, new coding opportunity is created since we can now mix more sessions together with X k,j . Note that the coding vector v(X k,j ) is also updated accordingly. The new v(X k,j ) represents the necessary "code alignment" in order to utilize this newly created coding opportunity. The (near-) optimality of the PE scheme is rooted deeply in the concept of code alignment, which aligns the "non-interfering subspaces" through the joint use of S(X k,j ) and v(X k,j ). V. QUANTIFY THE ACHIEVABLE RATES OF PE SCHEMES In this section, we describe how to use the PE schemes to attain the capacity of 1-to-3 broadcast PECs with COF (Proposition 2), the achievability results for general 1-to-K broadcast PEC with COF (Proposition 3), the capacity results for symmetric broadcast PECs (Proposition 4) and for spatially independent PECs with one-sided fairness constraints (Proposition 5). We first describe a detailed construction of a capacityachieving PE scheme for general 1-to-3 broadcast PECs with COF in Section V-A and then discuss the corresponding highlevel intuition in Section V-B. The high-level discussion will later be used to prove the achievability results for general 1-to-K broadcast PEC with COF in Section V-C. The proofs of the capacity results of two special classes of PECs are provided in Section V-D. A. Achieving the Capacity of 1-to-3 Broadcast PECs With COF -Detailed Construction Consider a 1-to-3 broadcast PEC with arbitrary channel parameters {p S{1,2,3}\S }. Without loss of generality, assume that the marginal success probability p k > 0 for k = 1, 2, 3. For the cases in which p k = 0 for some k, such d k cannot receive any packet. The 1-to-3 broadcast PEC thus collapses to a 1-to-2 broadcast PEC, the capacity of which was proven in . Given any arbitrary rate vector (R 1 , R 2 , R 3 ) that is in the interior of the capacity outer bound of Proposition 1, our goal is to design a PE scheme for which each d k can successfully decode its desired packets {X k,j : ∀j ∈ }, for k ∈ {1, 2, 3}, after n usages of the broadcast PEC. Before describing such a PE scheme, we introduce a new definition and the corresponding lemma. Given a rate vector (R 1 , R 2 , R 3 ) and the PEC channel parameters {p S{1,2,3}\S }, we say that destination d i dominates . Proof: Suppose this lemma is not true and we have d i dominates d k , d k dominates d l , and d l dominates d i . By definition, we must have We then notice that the product of the left-hand sides of (15), (16), and (17) equals the product of the right-hand side of (15), (16), and (17). As a result, all three inequalities of (15), (16), and (17) must also be equalities. Since (17) is an equality, we can also say that d i dominates d l . The proof of Lemma 4 is complete. By Lemma 4, we can assume that d 1 dominates d 2 , d 2 dominates d 3 , and d 1 dominates d 3 , which can be achieved by relabeling the destinations d k . We then describe a detailed capacity-achieving PE scheme, which has four major phases. The dominance relationship is a critical part in the proposed PE scheme. The high-level discussion of this capacityachieving PE scheme will be provided in Section V-B Phase 1 contains 3 sub-phases. In Phase 1.1, we always choose T = {1} for the PE scheme. In the beginning of time 1, we first select X 1,1 . We keep transmitting the uncoded packet according to v(X 1,1 ) = δ 1,1 until it is received by at least one of the three destinations {d 1 , d 2 , d 3 }. Update its S(X 1,1 ) and v(X 1,1 ) according to the UPDATE rule. Then we move to packet X 1,2 . Keep transmitting the uncoded packet according to v(X 1,2 ) = δ 1,2 until it is received by at least one of the three receivers {d 1 , d 2 , d 3 }. Update its S(X 1,2 ) and v(X 1,2 ) according to the UPDATE rule. Repeat this process until all X 1,j , j ∈ is received by at least one receiver. By the law of large numbers, Phase 1.1 will continue for In the beginning of Phase 1.2, we first select X 2,1 . We keep transmitting the uncoded packet according to v(X 2,1 ) = δ 2,1 until it is received by at least one of the three destinations {d 1 , d 2 , d 3 }. Update its S(X 2,1 ) and v(X 2,1 ). Repeat this process until all X 2,j , j ∈ is received by at least one receiver. By the law of large numbers, Phase 1.2 will continue for such packets, which are termed the queue Q 3;21 packets. We order all the Q 2;31 packets in any arbitrary sequence and order all the Q 3;21 packets in any arbitrary sequence. In the beginning of Phase 2.1, we first select the head-of-the-line X 2,j2 and the head-of-line X 3,j3 from these two queues Q 2;31 and Q 3;21 , respectively. Since these two packets can be linearly combined together. Let v tx denote the overall coding vector generated from these two packets (see Line 7 of the main PE scheme). As discussed in Line 10 of the main PE scheme, we keep transmitting the same coded packet v tx until at least one of the two packets X 2,j2 and X 3,j3 has a new S(X 2,j2 ) (or a new S(X 3,j3 )). In the end, we thus have three subcases: (i) only X 2,j2 has a new S(X 2,j2 ), (ii) only X 3,j3 has a new S(X 3,j3 ), and (iii) both X 2,j2 has a new S(X 2,j2 ) and X 3,j3 has a new S(X 3,j3 ). In Case (i), we keep the same T = {2, 3} and the same X 3,j3 but switch to the next-in-line Q 2;31 packet X 2,j ′ 2 . The new X 2,j ′ 2 will be then be used, together with the existing X 3,j3 to generate new v tx in Line 7 of the main PE scheme for the next time slot(s). In Case (ii), we keep the same T = {2, 3} and the same X 2,j2 but switch to the next-in-line Q 3;21 packet X 3,j ′ 3 . The new X 3,j ′ 3 will then be used, together with the existing X 2,j2 , to generate new v tx in Line 7 of the main PE scheme for the next time slot(s). In Case (iii), we keep the same T = {2, 3} and switch to the next-in-line packets X 2,j ′ 2 and X 3,j ′ 3 . The new pair X 2,j ′ 2 and X 3,j ′ 3 will then be used to generate new v tx in Line 7 of the main PE scheme for the next time slot(s). We repeat the above process until we have used up all Q 3;21 packets X 3,j . Remark 1: One critical observation of the PE scheme is that when two packets X 2,j2 or X 3,j3 are mixed together to generate v tx , each packet still keeps its own identity X 2,j2 and X 3,j3 , its own associated sets S(X 2,j2 ) and S(X 3,j3 ) and coding vectors v(X 2,j2 ) and v(X 3,j3 ). Even the decision whether to update S(X) or v(X) is made separately (Line 2 of the UPDATE) for each of the two packets X 2,j2 or X 3,j3 . Therefore, it is as if the two packets X 2,j2 or X 3,j3 are sharing the single time slot in a non-interfering way (like carpooling together). Following this observation, in Phase 2.1, whether we decide to switch the current X 2,j2 to the next-in-line Q 2;31 packet X 2,j ′ 2 is also completely independent from the decision whether to switch the current X 3,j3 to the next-in-line Q 3;21 packet X 3,j ′ 3 . Remark 2: We first take a closer look at when a Q 3;21 packet X 3,j3 will be switched to the next-in-line packet X 3,j ′ 3 . By Line 4 of the UPDATE, we switch to the next-in-line X 3,j ′ 3 if and only if one of {d 1 , d 3 } has received the current packet v tx , in which X 3,j3 participates. Therefore, in average each X 3,j3 will stay in Phase 2.1 for to completely finish the Q 3;21 packets. By similar arguments, it takes to completely use up the Q 2;31 packets. Since we assume that d 2 dominates d 3 , the dominance inequality in (14) implies that (22) is no smaller than (21). Therefore we indeed can finish the Q 3;21 packets before exhausting the Q 2;31 packets. We order all the Q 1;32 packets in any arbitrary sequence and order all the Q 3;12 packets in any arbitrary sequence. Following similar steps as in Phase 2.1, we first mix the head-of-the-line packets X 1,j1 and X 3,j3 of Q 1;32 and Q 3;12 , respectively, and then make the decisions of switching to the next-in-line packets X 1,j ′ 1 and X 3,j ′ 3 independently for the two queues Q 1;32 and Q 3;12 . We repeat the above process until we have used up all Q 3;12 packets X 3,j . Remark: We take a closer look at when a Q 3;12 packet X 3,j3 will be switched to the next-in-line packet X 3,j ′ 3 . By Line 4 of the UPDATE, we switch to the next-in-line X 3,j ′ 3 if and only if one of {d 2 , d 3 } has received the current packet v tx , in which X 3,j3 participates. Therefore, in average each X 3,j3 will stay in Phase 2.2 for number of Q 3;12 packets to begin with, it takes to completely finish the Q 3;12 packets. By similar arguments, it takes to completely use up the Q 1;32 packets. Since we assume that d 1 dominates d 3 , the dominance inequality in (14) implies that (24) is no smaller than (23). Therefore we indeed can finish the Q 3;12 packets before exhausting the Q 1;32 packets. Overall it takes roughly (23) such packets, which are termed the queue Q 2;13 packets. We order all the Q 1;23 packets in any arbitrary sequence and order all the Q 2;13 packets in any arbitrary sequence. Following similar steps as in Phases 2.1 and 2.2, we first mix the head-of-the-line packets X 1,j1 and X 2,j2 of Q 1;23 and Q 2;13 , respectively, and then make the decisions of switching to the next-in-line packets X 1,j ′ 1 and X 2,j ′ 2 independently for the two queues Q 1;23 and Q 2;13 . We repeat the above process until we have used up all Q 2;13 packets X 2,j . By the assumption that d 1 dominates d 2 and by the same arguments as in Phases 2.1 and 2.2, we indeed can finish the Q 2;13 packets before exhausting the Q 1;23 packets. Overall it takes roughly of time slots to finish Phase 2.3. Phase 3: Before the description of Phase-3 operations, we first summarize the status of all the packets in the end of Phase 2.3. For d 3 , all X 3,j packets that have S(X 3,j ) = ∅ have been used up in Phase 1.3. All X 3,j packets that have S(X 3,j ) = {1} have been used up in Phase 2.2. All X 3,j packets that have S(X 3,j ) = {2} have been used up in Phase 2.1. As a result, all the X 3,j packets are either received by d 3 (i.e., having 3 ∈ S(X 3,j )) or have S(X 3,j ) = {1, 2}. For Phase 3, we will focus on the latter type of X 3,j packets, which are termed the Q 3;12 packets. Recall the definition of f p (ST ) in (4). Totally, we have For d 2 , all X 2,j packets that have S(X 2,j ) = ∅ have been used up in Phase 1.2. All X 2,j packets that have S(X 2,j ) = {1} have been used up in Phase 2.3. As a result, all the X 2,j packets must satisfy one of the following: (i) X 2,j are received by d 2 (i.e., having 2 ∈ S(X 2,j )), or (ii) have S(X 2,j ) = {3}, or (iii) have S(X 2,j ) = {1, 3}. For Phase 3, we will focus on the latter two types of X 2,j packets, which are termed the Q 2;31 and the Q 2;13 packets, respectively. There are For d 1 , all X 1,j packets that have S(X 1,j ) = ∅ have been used up in Phase 1.1. As a result, all the X 1,j packets must satisfy one of the following: (i) X 1,j are received by d 1 (i.e., having 1 ∈ S(X 1,j )), or (ii) have S(X 1,j ) = {2}, (iii) have S(X 1,j ) = {3}, or (iv) have S(X 1,j ) = {2, 3}. For Phase 3, we will focus on the types (ii) and (iii), which are termed the Q 1;23 and the Q 1;32 packets, respectively. There are There are We are now ready to describe Phase 3, which contains 3 sub-phases. Phase 3.1: Similar to Phase 2.1, we choose T = {2, 3} for the PE scheme. In Phase 2.1, we chose the Q 2;31 packets X 2,j2 and the Q 3;21 packets X 3,j3 satisfying S(X 2,j2 ) = {3} and S(X 3,j3 ) = {2}. Since we have already used up all Q 3;21 packets in Phase 2.1, in Phase 3.1, we choose the Q 2;31 packets X 2,j2 and the new Q 3;12 packets X 3,j3 instead, such that the packets satisfy S(X 2,j2 ) = {3} and S(X 3,j3 ) = {1, 2}. Similar to Phase 2.1, we switch to the next-in-line packet as long as the S(X 2,j2 ) (or S(X 3,j3 )) is changed. Again, the decision whether to switch from X 2,j2 to the next-inline packet X 2,j ′ 2 is independent from the decision whether to switch from X 3,j3 to the next-in-line packet X 3,j ′ 3 . Note that, by Line 4 of the UPDATE, the S(X 2,j2 ) of a Q 2;31 packet X 2,j2 will change if and only if it is received by any one of {d 1 , d 2 }. Therefore, in average each Q 2;31 packet X 2,j2 will take 1 p ∪{1,2} number of time slots before we switch to the next-in-line packet X 2,j ′ 2 . For comparison, the S(X 3,j3 ) of a Q 3;12 packet X 3,j3 will change if and only if it is received by {d 3 }. Therefore, in average each Q 3;12 packet X 3,j3 will take 1 p3 number of time slots before we switch to the next-in-line packet X 3,j ′ 3 . We continue Phase 3.1 until we have finished all Q 2;31 packets. It is possible that we finish the Q 3;12 packets before finishing the Q 2;31 packets. In this case, we do not need to transmitting any Q 3;12 packets anymore and we use a degenerate T = {2} instead and continue Phase 3.1 by only choosing Q 2;31 packets X 2,j2 . Intuitively, Phase 3.1 is a cleanup phase that finishes the Q 2;31 packets that have not been used in Phase 2.1. While finishing up Q 2;31 packets, we also piggyback some Q 3;12 packets through network coding. If all Q 3;12 packets have been used up, then we continue sending pure Q 2;31 packets without mixing together any Q 3;12 packets. Since we have (29) number of Q 2;31 packets to begin with, it will take where the first, second, and the third terms correspond to the Q 2;13 packets generated in Phase 1.3, Phase 2.3, and Phase 2.1 plus Phase 3.1, respectively. We can further simplify (36) as Phase 3.2: After Phase 3.1, we move to Phase 3.2. Similar to Phase 3.1, Phase 3.2 serves the role of cleaning up the Q 1;32 packets that have not been used in Phase 2.2. More explicitly, we choose T = {1, 3}, and use the Q 1;32 packets X 1,j1 and the new Q 3;12 packets X 3,j3 , such that the packets satisfy S(X 1,j1 ) = {3} and S(X 3,j3 ) = {1, 2}. It is possible that all Q 3;12 packets have been used up in Phase 3.1. In this case, we do not need to transmitting any Q 3;12 packets anymore and we use a degenerate T = {1} instead and continue Phase 3.1 by only choosing Q 1;32 packets X 1,j1 . Similar to all previous phases, we switch to the next-inline packet as long as the S(X 1,j1 ) (or S(X 3,j3 )) is changed, and the decision whether to switch from X 1,j1 to the next-inline packet X 1,j ′ 1 is independent from the decision whether to switch from X 3,j3 to the next-in-line packet X 3,j ′ 3 . We continue Phase 3.2 until we have finished all Q 1;32 packets. Again, if we finish the Q 3;12 packets before finishing the Q 1;32 packets, then we stop transmitting any Q 3;12 packets, use a degenerate T = {1} instead, and continue Phase 3.2 by only choosing Q 1;32 packets X 1,j1 . By Line 4 of the UPDATE, the S(X 1,j1 ) of a Q 1;32 packet X 1,j1 will change if and only if it is received by any one of {d 1 , d 2 }. Therefore, in average each Q 1;32 packet X 1,j1 will take 1 p ∪{1,2} number of time slots before we switch to the next-in-line packet X 1,j ′ 1 . Since we have (34) number of Q 1;32 packets to begin with, it will take Similar to all previous phases, we switch to the next-inline packet as long as the S(X 1,j1 ) (or S(X 2,j2 )) is changed, and the decision whether to switch from X 1,j1 to the next-inline packet X 1,j ′ 1 is independent from the decision whether to switch from X 2,j2 to the next-in-line packet X 2,j ′ 2 . We continue Phase 3.3 until we have finished all Q 1;23 packets. If we finish the Q 2;13 packets before finishing the Q 1;23 packets, then we stop transmitting any Q 2;13 packets, use a degenerate T = {1} instead, and continue Phase 3.3 by only choosing Q 1;23 packets X 1,j1 . By Line 4 of the UPDATE, the S(X 1,j1 ) of a Q 1;23 packet X 1,j1 will change if and only if it is received by any one of {d 1 , d 3 }. Therefore, in average each Q 1;23 packet X 1,j1 will take 1 p ∪{1,3} number of time slots before we switch to the next-in-line packet X 1,j ′ 1 . Since we have (32) number of Q 1;23 packets to begin with, it will take number of time slots to finish Phase 3.3. Phase 4: We first summarize the status of all the packets in the end of Phase 3.3. For d 3 , all the X 3,j packets are either received by d 3 (i.e., having 3 ∈ S(X 3,j )) or have S(X 3,j ) = {1, 2}, the Q 3;12 packets. By Line 4 of the UPDATE, the S(X 3,j3 ) of a Q 3;12 packet X 3,j3 will change if and only if it is received by d 3 . Therefore, in average each Q 3;12 packet X 3,j3 will take 1 p3 number of time slots before we switch to the next-in-line packet X 3,j ′ 3 . Since the Q 3;12 packets participate in Phases 3.1 and 3.2, in the end of Phase 3.3, the total number of Q 3;12 packets becomes where (·) + = max(·, 0) is the projection to the non-negative reals. For d 2 , all X 2,j packets that have S(X 2,j ) = ∅ or S(X 2,j ) = {1} have been used up in Phase 1.2 or Phase 2.3, respectively. All X 2,j packets that have S(X 2,j ) = {3} have been used up in Phases 2.1 and 3.1. As a result, all the X 2,j packets are either received by d 2 (i.e., having 2 ∈ S(X 2,j )) or have S(X 2,j ) = {1, 3}, the Q 2;13 packets. By Line 4 of the UPDATE, the S(X 2,j2 ) of a Q 2;13 packet X 2,j2 will change if and only if it is received by d 2 . Therefore, in average each Q 2;13 packet X 2,j2 will take 1 p2 number of time slots before we switch to the next-in-line packet X 2,j ′ 2 . Since the Q 2;13 packets also participate in Phase 3.3, in the end of Phase 3.3, the total number of Q 2;13 packets becomes For d 1 , all X 1,j packets that have S(X 1,j ) = ∅, S(X 1,j ) = {2}, and S(X 1,j ) = {3} have been used up in Phases 1.1, 2.3+3.3, and 2.2+3.2, respectively. As a result, all the X 1,j packets are either received by d 1 (i.e., having 1 ∈ S(X 1,j )) or have S(X 1,j ) = {2, 3}, the Q 1;23 packets. In the end of Phase 3.3, the total number of Q 1;23 packets is where the first, second, and the third terms correspond to the Q 1;23 packets generated in Phase 1.1, 2.3+3.3, and 2.2+3.2, respectively. We can further simplify (42) as In Phase 4, since the only remaining packets (that still need to be retransmitted, see Lemma 3) are the Q 1;23 , Q 2;13 , and Q 3;12 packets, we always choose T = {1, 2, 3} and randomly and linearly mix the Q 1;23 , Q 2;13 , and Q 3;12 packets (one from each queue) for each time slot. That is, we use Phase 4 to clean up the remaining packets. Since in average a Q i;{1,2,3}\i packet X i,j takes 1 pi amount of time before it is received by d i , Phase 4 thus takes max Eq.(43) number of time slots to finish. More precisely, as time proceeds, we need to gradually switch to a degenerate T . For example, if the Q 2;13 packets are used up first, then we set the new T = {1, 3} and focus on mixing the remaining Q 1;23 and Q 3;12 packets. After (44) number of time slots, it is thus guaranteed that for sufficiently large n, all information packets X k,j , k ∈ {1, 2, 3}, and j ∈ satisfy k ∈ S(X k,j ). By Corollary 2, all d k can decode the desired packets X k,j , j ∈ with close-to-one probability. Quantify the throughput of the 4-phase scheme: The remaining task is to show that if (R 1 , R 2 , R 3 ) is in the interior of the outer bound in Proposition 1, then the total number of time slots used by the above 4-Phase PE scheme is within the time budget n time slots. That is, we need to prove that The summation of the first nine terms of the left-hand side of (45) can be simplified to where A 1.1-3.3 is the total number of time slots in Phases 1.1 to 3.3. Since (44) is the maximum of three terms, proving (45) is thus equivalent to proving that the following three inequality hold simultaneously. With direct simplification of the expressions, proving the above three inequalities is equivalent to proving B. Achieving the Capacity of 1-to-3 Broadcast PECs With COF -High-Level Discussion As discussed in Section V-A, one advantage of a PE scheme is that although different packets X k,j k and X i,ji with k = i may be mixed together, the corresponding evolution of X k,j k (the changes of S(X k,j k ) and v(X k,j k )) are independent from the evolution of X i,ji . Also by Lemma 2, two different packets X k,j k and X i,ji can share the same time slot without interfering each other as long as i ∈ S(X k,j k ) and k ∈ S(X i,ji ). These two observations enable us to convert the achievability problem of a PE scheme to the following "time slot packing problem." Let us focus on the (s, d 1 ) session. For any X 1,j packet, initially S(X 1,j ) = ∅. Then as time proceeds, each X 1,j starts to participate in packet transmission. The corresponding S(X 1,j ) evolves to different values, depending on the set of destinations that receive the transmitted packet in which X 1,j participates. Since in this subsection we focus mostly on S(X 1,j ), we sometimes use S(X) as shorthand if it is unambiguous from the context. Fig. 4 describes how S(X) evolves between different values. In Fig. 4, we use circles to represent the five different states according to the S(X) value. Recall that S rx is the set of destinations who successfully receive the transmitted coded packet. The receiving set S rx decides the transition between different states. In Fig. 4, we thus mark each transition arrow (between different states) by the value(s) of S rx that enables the transition. For example, by Line 4 of the UPDATE, when the initial state is S(X) = ∅, if the receiving set S rx ∋ 1, then the new set satisfies S(X) ∋ 1. Similarly, when the initial state is S(X) = ∅, if S rx = {2, 3}, then the new S(X) becomes S(X) = {2, 3}. (Note that the corresponding v(X 1,j ) also evolves over time to maintain the non-interfering property in Lemma 2, which is not illustrated in Fig. 4 .) Since S(X 1,j ) ∋ 1 if and only if d 1 receives X 1,j , it thus takes nR1 p1 logical time slots to finish the transmission of nR 1 information packets. On the other hand, some logical time slots for the (s, d 1 ) session can be "packed/shared" jointly with the logical time slots for the (s, d k ) session, k = 1, or, equivalently, one physical time slot can serve two sessions simultaneously. For the following, we quantify how many logical time slots of the (s, d 1 ) session are compatible to those of other sessions. For any S 0 ∈ 2 {1,2,3} , let A 1;S0 denote the number of logical time slots (out of the total nR1 p1 time slots) such that during those time slots, the transmitted X 1,j has S(X 1,j ) = S 0 . Initially, there are nR 1 packets X 1,j . If any one of {d 1 , d 2 , d 3 } receives the transmitted packet (equivalently S rx = ∅), S(X 1,j ) becomes non-empty. Therefore, each X 1,j contributes to 1 p ∪{1,2,3} logical time slots with S(X 1,j ) = ∅. We thus have We also note that during the evolution process of X 1,j , if any one of {d 1 , d 3 } receives the transmitted packet (equivalently S rx ∩ {1, 3} = ∅), then S(X) value will move from one of the two states "S(X) = ∅" and "S(X) = {2}" to one of the three states "S(X) = {3}," "S(X) = {2, 3}," and "S(X) ∋ 1." Therefore, each X 1,j contributes to 1 p ∪{1,3} logical time slots for which we either have S(X 1,j ) = ∅ or S(X 1,j ) = {2}. By the above reasoning, we have Similarly, during the evolution process of X 1,j , if any one of {d 1 , d 2 } receives the transmitted packet (equivalently S rx ∩ {1, 2} = ∅), then S(X) value will move from one of the two states "S(X) = ∅" and "S(X) = {3}" to one of the three states "S(X) = {2}," "S(X) = {2, 3}," and "S(X) ∋ 1." Therefore, each X 1,j contributes to 1 p ∪{1,2} logical time slots for which either S(X 1,j ) = ∅ or S(X 1,j ) = {3}. By the above reasoning, we have Before S(X) evolves to the state "S(X) ∋ 1," any logical time slot contributed by such an X must have one of the following four states: "S(X) = ∅," "S(X) = {2}," "S(X) = {3}," and "S(X) = {2, 3}." As a result, we must have Solving (46), (47), (48), and (49), we have We can also define A k;S0 as the number of logical time slots of the (s, d k ) session with S(X k,j k ) = S 0 . By similar derivation arguments, we have and Recall that by definition, A k;S0 is the number of logical time slots of the (s, d k ) session that is compatible to the logical time slots of (s, d i ) session with i ∈ S 0 . The achievability problem of a PE scheme thus becomes the following time slot packing problem. Consider 12 types of logical time slots and each type is denoted by (k; S 0 ) for some k ∈ {1, 2, 3}, S 0 ∈ 2 {1,2,3} , and k / ∈ S 0 . The numbers of logical time slots of each type are described in (50) to (61). Two logical time slots of types (k 1 ; S 1 ) and (k 2 ; S 2 ) are compatible if k 1 = k 2 , k 1 ∈ S 2 , and k 2 ∈ S 1 . Any compatible logical time slots can be packed together in the same physical time slot. For example, consider the following types of logical time slots: (1; {2, 3}), (2; {1, 3}), and (3; {1, 2}). Three logical time slots, one from each type, can occupy the same physical time slot since any two of them are compatible to each other. The time slot packing problem is thus: Can we pack all the logical time slots within n physical time slots? The detailed 4-phase PE scheme in Section V-A thus corresponds to the time-slot-packing policy depicted in Fig. 5 number of time slots to finish Phase 4. Depending on which of the three terms in (62) is the largest, the total number of physical time slots is one of the following three expressions: By (50) to (61), one can easily check that all three equations are less than n for any (R 1 , R 2 , R 3 ) in the interior of the outer bound of Proposition 1, which answers the time-slotpacking problem in an affirmative way. One can also show that the packing policy in Fig. 5 is the tightest among any other packing policy, which indeed corresponds to the capacityachieving PE scheme described in Section V-A. C. The Achievability Results of General 1-to-M Broadcast PECs With COF In Section V-B, we show how to reduce the achievability problem of a PE scheme to a time-slot-packing problem. However, the converse may not hold due to the causality constraint of the PE scheme. By taking into account the causality constraint, the time-slot-packing arguments can be used to generate new achievable rate inner bounds for general 1-to-M broadcast PECs with COF, which will be discussed in this subsection. One major difference between the tightest solution of the time-slot-packing problem in Fig. 5 and the detailed PE scheme in Section V-A is that for the former, we can pack the time slots in any order. There is no need to first pack those logical time slots that cannot be shared with any other time slots. Any packing order will result in the same amount of physical time slots in the end. On the other hand, for the PE scheme it is critical to perform the 4 phases (10 sub-phases) in sequence since many packets used in the later phase are generated by the previous phases. For example, all the packets in Phases 2 to 4 are generated in Phases 1. The causality constraints for a 1-to-M PEC with M ≥ 4 quickly become complicated due to the potential cyclic depen-dence 5 of the problem. To simplify the derivation, we consider the following sequential acyclic construction of PE schemes, which allows tractable performance analysis but at the cost of potentially being throughput suboptimal. As will be seen in Section VI-D, for most PEC channel parameters, the proposed sequential acyclic PE schemes are sufficient to achieve the channel capacity. To design a sequential PE scheme, we first observe that in the capacity-achieving 4-Phase PE scheme in Section V-A, we always start from mixing a small subset T then gradually move to mixing a larger subset T . The intuition behind is that when mixing a small set, say T = {2, 3} in Phase 2.1, we can create more coding opportunities in the later Phase 4 when T = {1, 2, 3}. Recall the definition of cardinality-compatible total ordering ≺ on 2 in (5). For a sequential PE scheme, we thus choose the mixing set T from the smallest to the largest according to the given cardinality-compatible total ordering. The detailed algorithm of choosing T and the target packets X k,j k , k ∈ T , is described as follows. There are (2 K − 1) phases and each phase is indexed by a non-empty subset T ⊆ . We move sequentially between phases according to the cardinality-compatible total ordering ≺. That is, if T 1 ≺ T 2 and there is no other subset T 3 satisfying T 1 ≺ T 3 ≺ T 2 , then after the completion of Phase T 1 , we move to Phase T 2 . Consider the operation in Phase T . Recall that the basic properties of the PE scheme allow us to choose the target packets X k,j k independently for all k ∈ T . In Phase T , consider a fixed k ∈ T . Let S k = T \k. We first choose a Q k;S k packet X k,j k , i.e., those with S(X k,j k ) = S k , and keep using this packet for transmission, which will be mixed with packets from other sessions according to Line 7 of the PE scheme. Whenever the current X k,j k packet evolves (the corresponding S(X k,j k ) changes), we move to the next Q k;S k packet X k,j ′ k . Continue this process for a pre-defined amount of time slots. We use w k;S k →S k to denote the number of time slots in which we choose a Q k;S k packet. After w k;S k →S k number of time slots, we are still in Phase T but we will start to choose a different Q k;S k packet X k,j k (i.e., with S(X k,j k ) =S k ), which will be mixed with packets from other sessions in T . 5 For general 1-to-M PECs with M ≥ 4, we may have the following cyclic dependence relationship: Packet mixing in Phase A needs to use the packets generated by the packet mixing during Phase B. Packing mixing in Phase B needs the packets resulted from the packet mixing during Phase C. But the packing mixing of Phase C also needs the packets resulted from the packing mixing in Phase A. Quantifying such a cyclic dependence relationship with causality constraints is a complicated problem. More explicitly, we choose a sequence ofS k such that allS k satisfy S k ⊆S k ⊆ ( \k), which guarantees that such new X k,j k with S(X k,j k ) =S k is still non-interfering from the perspectives of all other sessions in T . The order we choose theS k follows that of the total ordering ≺. The closerS k is to S k , the earlier we use suchS k . For any chosenS k , we choose a Q k;S k packet X k,j k , i.e., those with S(X k,j k ) =S k , and keep using this packet to generate coded packets for transmission. Whenever the current X k,j k packet evolves (the corresponding S(X k,j k ) changes), we move to the next Q k;S k packet X k,j ′ k . Continue this process for a pre-defined amount of time slots. We use w k;S k →S k to denote the number of time slots in which we choose a Q k;S k packet. That is, w k;S k →S k is the number of time slots that we are using a Q k;S k packet in substitute for a Q k;S k packet, which is similar to the operations in Phases 3.1 to 3.3. After w k;S k →S k number of time slots, we are still in Phase T but we will move to the next eligibleS k according to the total ordering ≺. Continue this process until allS k have been used. Since we choose the target packet X k,j k independently for all k, Phase T thus takes (67) Note that (63) to (67) are similar to (8) to (12) of the achievability inner bound in Proposition 3. The only differences are (i) The new scaling factor n in (64) and (65) when compared to (8) and (10); (ii) The use of the max operation in (63) when compared to (9); and (iii) The equality "=" in (65) and (66) instead of the inequality "≥" in (10) and (11). The first two differences (i) and (ii) are simple restatements and do not change the feasibility region. The third difference (iii) can be reconciled by sending auxiliary dummy (all-zero) packets in the PE scheme as will be clear in the following proof. As a result, we focus on proving the existence of a feasible sequential PE scheme provided the new inequalities (63) to (67) are satisfied. Assuming sufficiently large n, the law of large numbers ensures that all the following discussion are accurate within the precision o(n), which is thus ignored for simplicity. (64) implies that we can finish all the phases within n time slots. Since each Q k;∅ packet X k,j k in average needs 1 p ∪ time slots before its S(X k,j k ) evolves to another value, (65) ensures that after Phase {k}, all Q k;∅ packets have been used up and evolved to a different Q k;S packet. Suppose that we are currently in Phase (T ∪ {k}) for some k / ∈ T , and suppose that we just finished choosing the Q k;S ′ packet for some old S ′ and are in the beginning of choosing a new Q k;S packet (with a new S = S ′ ) that will subsequently be mixed with packets from other sessions. By Line 4, each Q k;S packet evolves to a different packet if and only if one of the d i with i ∈ ( \S) receives the coded transmission. Therefore, sending Q k;S packets for w k;S→T number of time slots will consume additional w k;S→T · p ∪( \S) number of Q k;S packets. Similarly, the previous phases (T 1 number of Q k;S packets. The left-hand side of (67) thus represents the total number of Q k;S packets that have been consumed after finishing the w k;S→T number of time slots of Phase (T ∪{k}) sending Q k;S packets. As will be shown short after, the right-hand side of (67) represents the total number of Q k;S packets that have been created until the current time slot. As a result, (67) corresponds to a packet-conservation law that limits the largest number of Q k;S packets that can be used in Phase (T ∪ {k}). To show that the right-hand side of (67) represents the total number of Q k;S packets that have been created, we notice that the Q k;S packets can either be created within the current Phase (T ∪ {k}) but during the previous attempts of sending Q k;S1 packets in Phase (T ∪ {k}) with S 1 ≺ S; or be created in the previous phases (T 1 ∪{k}) with (T 1 ∪{k}) ≺ (T ∪{k}). The former case corresponds to the first term on the right-hand side of (67) and the latter case corresponds to the second term on the right-hand side of (67). For the former case, for each time slot in which we transmit a Q k;S1 packet in Phase (T ∪ {k}), there is some chance that the packet will evolve into a Q k;S packet. More explicitly, by Line 4 of the UPDATE, a Q k;S1 packet in Phase (T ∪ {k}) evolves into a Q k;S packet if and only if the packet is received by all d i with i ∈ (S\T ) and not by any d i with i ∈ ( \S). As a result, each such time slot will create f p ((S\T )( \S)) number of Q k;S packet in average. Since we previously sent Q k;S1 packets for a total w k;S1→T number of time slots, the first term of the right-hand side of (67) is indeed the number of Q k;S packets created within the current Phase (T ∪ {k}) but during the previous attempts of sending Q k;S1 packets. For the latter case, for each time slot in which we transmit a Q k;S1 packet in Phase (T 1 ∪ {k}), there is some chance that the packet will evolve into a Q k;S packet, provided we have T 1 ⊆ S and S S 1 . More explicitly, by Line 4 of the UPDATE, a Q k;S1 packet in Phase (T 1 ∪ {k}) evolves into a Q k;S packet if and only if Therefore, for any (S 1 , T 1 ) pair satisfying T 1 ⊆ S and S S 1 , a Q k;S1 packet in Phase (T 1 ∪ {k}) will have f p ((S\T 1 )( \S)) probability to evolve into a Q k;S packet. Since we previously sent Q k;S1 packets in Phase (T 1 ∪ {k}) for a total w k;S1→T1 number of time slots, the second term of the right-hand side of (67) is indeed the number of Q k;S packets created during the attempts of sending Q k;S1 packets in the previous Phase (T 1 ∪ {k}). Suppose that we are currently in Phase (S ∪ {k}) for some k / ∈ S. To justify (66), we first note that in the sequential PE construction we only select the packets X k,j with k / ∈ S(X k,j ). By Line 4 of the UPDATE, each packet X k,j transmitted in Phase T is either received by the intended destination d k , or it will evolve into a new S(X k,j ) that is a proper superset of (T \k). As a result, the cardinality-compatible total ordering "≺" ensures that once we are in Phase (S ∪ {k}), any subsequent Phase T with (S ∪ {k}) ≺ T will not create any new Q k;S packets. Therefore, if we can clean up all Q k;S packets in Phase (S∪{k}) for all S ⊆ ( \k), then in the end of the sequential PE scheme, there will be no Q k;S packets for any S ⊆ ( \k). This thus implies that all X k,j packets in the end must have S(X k,j ) ∋ k. By Lemma 3, decodability is thus guaranteed. (66) is the equation that guarantees that we can clean up all Q k;S packets in Phase (S ∪ {k}). By similar computation as in the discussion of the righthand side of (67), the right-hand side of (66) is the total number of Q k;S packets generated during the attempts of sending Q k;S1 packets in the previous Phase (T 1 . Similar to the computation in the discussion of the left-hand side of (67), there is number of Q k;S packets that have been used during the previous Phases (T 1 ∪ {k}). In the beginning of this phase, we send Q k;S→S packets for w k;S→S number of time slots, which can clean up additional number of Q k;S packets. Jointly, (68), (69), and (66) ensures that we can use up all Q k;S packets in Phase (S ∪ {k}). The above reasonings show that we can finish the transmission in n time slots, make all X k,j have S(X k,j ) ∋ k, and obey the causality constraints. Therefore, the corresponding sequential PE scheme is indeed a feasible solution. The proof of Proposition 3 is thus complete. D. Attaining The Capacity Of Two Classes of PECs In this section, we prove the capacity results for symmetric 1-to-K broadcast PECs in Proposition 4 and for spatially independent broadcast PECs with one-sided fairness constraints in Proposition 5. Proof of Proposition 4: Since the broadcast channel is symmetric, for any S 1 , S 2 ∈ 2 , we have Without loss of generality, also assume that R 1 ≥ R 2 ≥ · · · ≥ R K . By the above simplification, the outer bound in Proposi-tion 1 collapses to the following single linear inequality: We use the results in Proposition 3 to prove that (70) is indeed the capacity region. To that end, we first fix an arbitrary cardinality-compatible total ordering. Then for any S ⊆ ( \k), we choose and w k;S→T = 0 for all T being a proper subset of S. The symmetry of the broadcast PEC, the assumption that R 1 ≥ R 2 ≥ · · · ≥ R K , and (63) jointly imply that for all T = ∅. For completeness, we set x ∅ = 0. By simple probability arguments as first described 6 in Section V-B, we can show that the above choices of w k;S→T and x T are all non-negative and jointly satisfy the inequalities (9) to (12). The remaining task is to show that inequality (8) is satisfied for any (R 1 , · · · , R K ) in the interior of the capacity outer bound (70). To that end, we simply need to verify the following equalities by some simple arithmetic computation. Summing (72) over different k values, we thus show that any (R 1 , · · · , R K ) in the interior of the capacity outer bound (70) indeed satisfies (8). The proof of Proposition 4 is complete. Proof of Proposition 5: Consider an arbitrary spatially independent broadcast PEC with 0 < p 1 ≤ p 2 ≤ · · · ≤ p K . The capacity outer bound in Proposition 1 implies that any achievable rate vector (R 1 , · · · , R K ) must satisfy We use the results in Proposition 3 to prove that any onesidedly fair rate vector (R 1 , · · · , R K ) ∈ Λ osf that is in the interior of (73) is indeed achievable. To that end, we first fix an arbitrary cardinality-compatible total ordering. Then for any S ⊆ ( \k), we choose and w k;S→T = 0 for all T being a proper subset of S. By Lemma 5 in Appendix D and by (63), we have for all T = ∅. For completeness, we set x ∅ = 0. The remaining proof of Proposition 5 can be completed by following the same steps after (71) of the proof of Proposition 4. VI. FURTHER DISCUSSION OF THE MAIN RESULTS We provide some further discussion of the main results in this section. In particular, we focus on the accounting overhead of the PE schemes, the minimum finite field size of the PE schemes, the sum rate performance of asymptotically large M values, and numerical evaluations of the outer and inner bounds for general 1-to-K broadcast PECs. A. Accounting Overhead Thus far we assume that the individual destination d k knows the global coding vector v tx that is used to generate the coded symbols (see Line 10 of the main PE scheme). Since the coding vector v tx is generated randomly, this assumption generally does not hold, and the coding vector v tx also needs to be conveyed to the destinations. Otherwise, destinations d k cannot decode the original information symbols X k,j for the received coded symbols Z k (t), t ∈ . The cost of sending the coding vector v tx is termed the coding overhead or the accounting overhead. We use the generation-based scheme in to average out and absorb the accounting overheard. Namely, we first choose sufficiently large n and finite field size q such that the PE scheme can achieve (1 − ǫ)-portion of the capacity with arbitrarily close-to-one probability when assuming there is no accounting overhead. Once the n and q values are fixed, we choose an even larger finite field GF(q M+ K k=1 nR k ) for some large integer M . The large finite field is then treated as a vector of dimension M + K k=1 nR k . Although each information symbol (vector) is chosen from X k,j ∈ GF(q M+ K k=1 nR k ), we limit the range of the X k,j vector value such that the first K k=1 nR k coordinates are always zero, i.e., no information is carried in the first K k=1 nR k coordinates. We can thus view the entire systems as sending M coordinates in each vector. During the transmission of the PE scheme, we focus on coding over each coordinate, respectively, rather than jointly coding over the entire vector. The same coding vector v tx is used repeatedly to encode the last M coordinates. And we use the first K k=1 nR k coordinates to store the coding vector v tx . Since only the last M coordinates are used to carry information, overall the transmission rate is reduced by a factor M M+ K k=1 nR k . By choosing a sufficiently large M , we have averaged out and absorbed the accounting overhead. B. Minimum Finite Field Size The PE scheme in Section IV is presented in the context of random linear network coding, which uses a sufficiently large finite field size GF(q) and proves that the desired properties hold with close-to-one probability. The main advantage of this random-coding-based description is that the entire algorithm can be carried out in a very efficient and distributed fashion. For example, with a sufficiently large q, the source s only needs to bookkeep the S(X k,j ) and v(X k,j ) values of all the information packets X k,j . All the coding and update computations are of linear complexity. On the other hand, the drawback of a randomized algorithm is that even with very large GF(q), there is still a small probability that after the termination of the PE algorithm, some destination d k has not accumulated enough linearly independent packets to decode the desired symbols X k,1 to X k,nR k . For the following, we discuss how to covert the randomized PE scheme into a deterministic algorithm by quantifying the corresponding minimum size of the finite field. Proposition 6: Consider the 1-to-K broadcast PEC problem with COF. For any fixed finite field GF(q 0 ) satisfying q 0 > K, all the achievability results in Propositions 2, 3, 4, and 5 can be attained by a deterministic PE algorithm on GF(q 0 ) that deterministically computes the mixing coefficients {c k : ∀k ∈ T } in Line 7 of the PE scheme. The proof of Proposition 6 is relegated to Appendix C. Remark 1: In practice, the most commonly used finite field is GF(2 8 ). Proposition 6 guarantees that GF(2 8 ) is sufficient for coding over K ≤ 255 sessions together. Remark 2: On the other hand, the construction of good mixing coefficients {c k : ∀k ∈ T } in Proposition 6 is computationally intensive. The randomized PE scheme has substantial complexity advantage over the deterministic PE scheme. C. The Asymptotic Sum-Rate Capacity of Large M Values We first define the sum-rate capacity as follows: Definition 9: The sum-rate capacity R * sum is defined as Proposition 5 quickly implies the following corollary. Corollary 3: Consider any spatially independent 1-to-K broadcast PECs with marginal success probabilities 0 < p 1 ≤ p 2 ≤ · · · ≤ p K < 1. With COF, the sum-rate capacity satisfies K k=1 If we further enforce perfect fairness, i.e., R 1 = R 2 = · · · = R K , then the corresponding sum-rate capacity R * sum,perf.fair becomes R * sum,perf.fair = . Proof: Since the sum-rate capacity nR * sum is no larger than the total available time slots n, we have the upper bound R * sum ≤ 1. Since the rate vector is one-sidedly fair, Proposition 5 leads to the lower bound of R * sum . Since a perfectly fair rate vector (R, R, · · · , R) is also one-sidedly fair, Proposition 5 gives the exact value of R * sum,perf.fair . Corollary 3 implies the following. Consider any fixed p > 0. Consider a symmetric, spatially independent 1-to-K broadcast PEC with marginal success probability p 1 = p 2 = · · · = p K = p. When K is sufficiently large, both the sum-rate capacities R * sum and R * sum,perf.fair approach one. That is, for sufficiently large K, network coding completely removes all the channel uncertainty by taking advantage of the spatial diversity among different destinations d i . Therefore, each (s, d k ) session can sustain rate 1−ǫ K for some ǫ > 0 where ǫ → 0 when K → ∞. Note that when compared to the MIMO capacity gain, the setting in this paper is more conservative in a sense that it assumes that the channel gains change independently from time slot to time slot (instead of block fading) while no coordination is allowed among destinations. This relationship was first observed and proven in by identifying a lower bound of R * sum,perf.fair for symmetric, spatially independent PECs. Compared to the results in , Corollary 3 characterizes the exact value of R * sum,perf.fair and provides a tighter lower bound on R * sum for non-symmetric spatially independent PECs. The R * sum,perf.fair will later be evaluated numerically in Section VI-D for non-symmetric spatially independent PECs. Fig. 6 illustrates the 3-dimensional capacity region of (R 1 , R 2 , R 3 ) of a spatially independent, 1-to-3 broadcast PEC with COF. The corresponding marginal probabilities are p 1 = 0.7, p 2 = 0.5, and p 3 = 0.3. The six facets in Fig. 6 correspond to the six different permutations used in Proposition 1. D. Numerical Evaluation For general 1-to-K PECs with K ≥ 4, we can use the outer and inner bounds in Propositions 1 and 3 to bracket the actual capacity region. Since there is no tightness guarantee for K ≥ 4 except for the two special classes of channels in Section III-B, we use computer to numerically evaluate the tightness of the outer and inner bound pairs. To that end, for any fixed K value, we consider spatially independent 1-to-K broadcast PEC with the marginal success probabilities p k chosen randomly from (0, 1). To capture the Kdimensional capacity region, we first choose a search direction v = (v 1 , · · · , v K ) uniformly randomly from a K-dimensional unit ball. With the chosen values of p k and v, we use a linear programming (LP) solver to find the largest t outer such that (R 1 , · · · , R K ) = (v 1 · t outer , · · · , v K · t outer ) satisfies the capacity outer bound in Proposition 1. To evaluate the capacity inner bound, we need to choose a cardinality-compatible total ordering. For any set S ⊆ as predicted by Corollary 3. We are also interested in the sum rate capacity under asymmetric channel profiles (also known as heterogeneous channel profiles). Consider asymmetric, spatially independent PECs. For each p value, we let the channel gains p 1 to p K be equally spaced between (p, 1), i.e., p k = p+(k −1) 1−p K−1 . We then plot the sum rate capacities for different p values. Fig. 9 describes the case for K = 6. The sum rate capacities are depicted by solid curves, which is obtained by solving the linear inequalities in the outer and inner bounds of Propositions 1 and 3. For all the parameter values used to plot Fig. 9, the outer and inner bounds meet and we thus have the exact sum rate capacities for the case of K = 6. The best achievable rate of time sharing are depicted by dashed curves in Fig. 9. We consider both a perfectly fair system (R, R, · · · , R) or a proportionally fair system (p 1 R, p 2 R, · · · , p K R) for which the rate of the (s, d k ) session is proportional to the marginal success probability p k (the optimal rate when all other sessions are silent). To highlight the impact of channel heterogeneity, we also redraw the curves of perfectly symmetric PECs with p 1 = · · · = p K = p. As seen in Fig. 9, for perfectly fair systems, the sum-rate capacity gain does not increase much when moving from symmetric PECs p 1 = · · · = p K = p to the heterogeneous channel profile with p 1 to p K evenly spaced between (p, 1). The reason is due to that the worst user d 1 (with the smallest p 1 ) dominates the system performance in a perfectly fair system. When we allow proportional fairness, network coding again provides substantial improvement for all p values. However, the gain is not as large as the case of symmetric channels. For example, when p 1 to p K are evenly spaced between (0, 1). The sum rate capacity of a proportionally fair system is 0.56 (p = 0). However, if all p 1 to p K are concentrated on their mean 0.5, then the sum rate capacity of the symmetric channel (p = 0.5) is 0.79. The results show that for practical implementation, it is better to group together all the sessions of similar marginal success rates and perform intersession network coding within the same group. We also repeat the same experiment of Fig. 9 but for the case K = 20 in Fig. 10. In this case of a moderate-sized K = 20, the sum-rate capacity of a perfectly fair system is characterized by Proposition 5. On the other hand, the sum-rate capacity of a proportionally fair system are characterized by Proposition 5 only when all p 1 to p K are in the range of (see the discussion of one-sidedly fair systems in Section V-D). Since the evaluations of both the outer and inner bounds have prohibitively high complexity for the case K = 20, we use the capacity formula of Proposition 5 as a substitute 7 of the sum-rate capacity for p < 0.5, which is illustrated in Fig. 10 by the fine dotted extension of the solid curve for the region of p ∈ . Again, the more sessions (K = 20) to be encoded together, the higher the network coding gain over the best time sharing rate. VII. CONCLUSION The recent development of practical network coding schemes has brought attentions back to the study of packet erasure channels (PECs), which is a generalization of the classic binary erasure channels. Since per-packet feedback (such as ARQ) is widely used in today's network protocols, it is thus of critical importance to study PECs with channel output feedback (COF). This work have focused on deriving the capacity of general 1-to-K broadcast PECs with COF, which was previously known only for the case K = 2. In this work, we have proposed a new class of intersession network coding schemes, termed the packet evolution (PE) schemes, for the broadcast PECs. Based on the PE schemes, we have derived the capacity region for general 1-to-3 broadcast PECs, and a pair of capacity outer and inner bounds for general 1-to-K broadcast PECs, both of which can be easily evaluated by any linear programming solver for the cases K ≤ 6. It has also been proven that the outer and inner bounds meet for two classes of 1-to-K broadcast PECs: the symmetric broadcast PECs, and the spatially independent broadcast PECs with the one-sided fairness rate constraints. Extensive numerical experiments have shown that the outer and inner bounds meet for almost all broadcast PECs encountered in practical scenarios. Therefore, we can effectively use the outer/inner bounds as the substitute for the capacity region in practical applications. The capacity results in this paper also show that for large K values, the noise of the broadcast PECs can be effectively removed by exploiting the inherent spatial diversity of the system, even without any coordination between the destinations. For practical implementation, the COF usually arrives in batches. That is, instead of instant per-packet COF, we usually have periodic, per-batch COF. The PE scheme can be modified to incorporate periodic COF as well. The corresponding discussion and some precursory empirical implementation of the revised PE scheme can be found in . ACKNOWLEDGMENT This work was supported in parts by NSF grants CCF-0845968 and CNS-0905331. The author would also like to thank for the insightful suggestions of Profs. Anant Sahai and David Tse. Since S 0 (X k,j ) = ∅ for all X k,j and the only d i satisfying i ∈ (S 0 (X k,j ) ∪ {k}) is d k , we only need to check whether v 0 (X k,j ) is in the linear space span(Ω Z,k (0), Ω M,k ). Note that in the end of time 0, v 0 (X k,j ) is the elementary vector δ k,j ∈ Ω M,k . Lemma 2 thus holds in the end of time 0. Suppose Lemma 2 is satisfied in the end of time (t − 1). Consider the end of time t. We use T to denote the subset chosen in the beginning of time t and use {X k,j k : ∀k ∈ T } to denote the corresponding target packets. Consider the following cases: Case 1: Consider those X k,j k such that S t (X k,j k ) = S t−1 (X k,j k ). We first note that if Line 4 of the UPDATE is executed, then S t (X k,j k ) = S t−1 (X k,j k ). Therefore, for those X k,j k such that S t (X k,j k ) = S t−1 (X k,j k ), we must have that Lines 4 and 5 of the UPDATE are not executed, which implies By definition, Ω Z,i (t − 1) ⊆ Ω Z,i (t) for all i ∈ and t ∈ . By the induction assumption, we thus have that for Vector v t (X k,j k ) is thus non-interfering from the perspectives of all d i , i ∈ (S t (X k,j k ) ∪ {k}). Case 2: Consider those X k ′ ,j ′ that are not a target packet. Since those packets do not participate in time t and their S(X k ′ ,j ′ ) and v(X k ′ ,j ′ ) do not change from time (t − 1) to time t. The same arguments of Case 1 hold verbatim for this case. Case 3: Consider those target packets X k,j k such that S t (X k,j k ) = S t−1 (X k,j k ). For those target packets X k,j k with S t (X k,j k ) = S t−1 (X k,j k ), we must have S t (X k,j k ) = (T ∩ S t−1 (X k,j k )) ∪ S rx and v t (X k , j k ) = v tx by Lines 4 and 5 of the UPDATE, respectively. Consider any d i such that i ∈ (S t (X k,j k ) ∪ {k}). We have two subcases: Case 3.1: i ∈ S rx . Since all such d i must explicitly receive the new v t (X k,j k ) = v tx in the end of time t, we must have v t (X k,j k ) ∈ span(v tx ) = span(Z i (t)) ⊆ Ω Z,i (t) ⊆ span(Ω Z,i (t), Ω M,i ). Such v t (X k,j k ) is thus non-interfering from d i 's perspective. where (74) follows from that k ∈ T since X k,j k is a target packet. (75) follows from that (S t−1 (X k,j k ) ∪ {k}) ⊇ T by Line 6 of the main structure of the PE scheme. From (75), the i value in this case must satisfy Also by Line 6 of the main structure of the PE scheme, for all i satisfy (76) we must have i ∈ (T \S rx ) ⊆ T ⊆ (S t−1 (X l,j l )∪ {l}) for all l ∈ T . By induction, the v t−1 (X l,j l ) vectors used to generate the new v tx (totally |T | of them) must all be noninterfering from d i 's perspective. Therefore where the last equality follows from that d i , i ∈ T \S rx , does not receive any packet in time t. Since v tx is a linear combination of v t−1 (X l,j l ) for all l ∈ T , we thus have v t (X k,j k ) = v tx ∈ span(Ω Z,i (t), Ω M,i ). Based on the above reasoning, v t (X k,j k ) is non-interfering for The proof is completed by induction on the time index t. APPENDIX B A PROOF OF LEMMA 3 Proof of Lemma 3: We prove this lemma by induction on time t. In the end of time t = 0, since Lemma 3 is satisfied. Consider the end of time t > 0. By induction, the following event is of close-to-one probability: The following proofs are conditioned on the event that (78) is satisfied. We use T to denote the subset chosen in the beginning of time t and use {X k,j k } to denote the corresponding target packets. Consider the following cases: Case 1: Consider those k ∈ T such that the corresponding target packet X k,j k either has S t (X k,j k ) = S t−1 (X k,j k ) or has k ∈ S t−1 (X k,j k ). For the former subcase S t (X k,j k ) = S t−1 (X k,j k ), by Line 4 of the UPDATE, we must have v t (X k,j k ) = v t−1 (X k,j k ). Since X k,j k is the only packet among {X k,j : ∀j ∈ } that participate in time t, for which the corresponding v(X k,j ) coding vector may change, we must have v t (X k,j ) = v t−1 (X k,j ) for all j ∈ . We then have = Ω R,k (t − 1). We note that for the latter subcase k ∈ S t−1 (X k,j k ), we must have T ⊆ (S t−1 (X k,j k ) ∪ {k}) = S t−1 (X k,j k ) by Line 6 of the main PE scheme. Therefore Line 4 of the UPDATE implies that k ∈ S t (X k,j k ) as well. Since the remaining space Ω R,k only counts the vectors v(X k,j ) with k / ∈ S(X k,j ), (79) holds for the latter subcase as well. For both subcases, let w k (t) denote the corresponding coding vector of Z k (t), which may or may not be an erasure. We then have span(Ω Z,k (t), Ω R,k (t)) = span(w k (t), Ω Z,k (t − 1), Ω R,k (t)) = span(w k (t), Ω Z,k (t − 1), Ω R,k (t − 1)) where (80) is obtained by the induction condition (78). Lemma 3 thus holds for the k values satisfying Case 1. Case 2: Consider those d l with l / ∈ T . Since no X l,j packets participate in time t and their S(X l,j ) and v(X l,j ) do not change in time t. The same arguments of Case 1 thus hold verbatim for this case. Case 3: Consider those k ∈ T such that the corresponding target packet X k,j k has S t (X k,j k ) = S t−1 (X k,j k ) and k / ∈ S t−1 (X k,j k ). Define Ω ′ R as Note that the conditions of Case 3 and (81) jointly imply that . We have two subcases Case 3.1: k / ∈ S t (X k,j k ) and Case 3.2: k ∈ S t (X k,j k ). Case 3.1: k / ∈ S t (X k,j k ). By Line 4 of the UPDATE, we have k / ∈ S rx , i.e., d k receives an erasure in time t. Therefore Ω Z,k (t) = Ω Z,k (t − 1). We will first show that span (Ω Z,k (t), Ω R,k (t)) ⊆ span(Ω Z,k (t), Ω M,k ). As a result, we have v t (X k,j k ) = v tx ∈ span(Ω Z,k (t), Ω M,k ) since v tx is a linear combination of all v t−1 (X l,j l ) for all l ∈ T . Therefore, we have Since we condition on the event that (78) holds, we have Joint (83) and (84) show that span (Ω Z,k (t), Ω R,k (t)) ⊆ span(Ω Z,k (t), Ω M,k ). To prove Lemma 3 for Case 3.1, it remains to show that the event span (Ω Z,k (t), Ω R,k (t)) ⊇ span(Ω Z,k (t), Ω M,k ) is of close-to-one probability, conditioning on (78) being true. We consider two subcases: depending on whether the following equation is satisfied. where (86) Recall that v t (X k,j k ) = v tx is a linear combination of v t−1 (X l,j l ) satisfying in (82). By (89) and the assumption that (85) is not satisfied, we thus have that each v t−1 (X l,j l ) can be written as a unique linear combination of αv t−1 (X k,j k ) + w where α is a GF(q) coefficient and w is a vector satisfying w ∈ span (Ω Z,k (t), Ω ′ R ). By the same reasoning, we can where α is a GF(q) coefficient, w is a vector satisfying w ∈ span (Ω Z,k (t), Ω ′ R ), and the values of α and w depend on the random coefficients c l for all l = k. As a result, we have . Since (85) is not satisfied and w ∈ span (Ω Z,k (t), Ω ′ R ), we have if and only if (c k + α) = 0. Since c k is uniformly distributed in GF(q) and the random variables c k and α are independent, the event that (91) is true has the conditional probability q−1 q , conditioning on (78) being true. For sufficiently large q values, the conditional probability approaches one. Case 3.2: k ∈ S t (X k,j k ). Recall that for Case 3, we consider those k such that k / ∈ S t−1 (X k,j k ). By Line 4 of the UPDATE, we have k ∈ S rx , i.e., d k receives the transmitted packet perfectly in time t. Therefore, in the end of time t, Ω R,k (t) = Ω ′ R , which was first defined in (81). We consider two subcases: depending on whether the following equation is satisfied. By (95), (96), and the assumption that (92) is not satisfied, each v t−1 (X l,j l ) can thus be written as a unique linear combination of αv t−1 (X k,j k )+w where α is a GF(q) coefficient and w is a vector satisfying w ∈ span (Ω Z,k (t − 1), Ω ′ R ). Since v t (X k,j k ) = v tx is a linear combination of v t−1 (X l,j l ), by the same reasoning, we can rewrite v t (X k,j k ) as v t (X k,j k ) = c k v t−1 (X k,j k ) + ∀l∈T \k c l v t−1 (X l,j l ) = c k v t−1 (X k,j k ) + (αv t−1 (X k,j k ) + w) = (c k + α)v t−1 (X k,j k ) + w. Combining all cases: Let A t denote the event that span(Ω Z,k (t), Ω R,k (t)) = span(Ω Z,k (t), Ω M,k ) and let T denote the target set chosen in time t. Since for Cases 3.1.2 and 3.2.2 the conditional probability of A t given A t−1 is lower bounded by q−1 q and for all other cases the conditional probability is one, the discussion of Cases 1 to 3.2 thus proves the following inequalities: Since for any T ⊆ we must have |T | ≤ K, we then have By concatenating the conditional probabilities, we thus have Prob (span(Ω Z,k (t), Ω R,k (t)) = span(Ω Z,k (t), Ω M,k )) As a result, for any fixed K and n values, we can choose a sufficiently large finite field GF(q) such that (100) approaches one. Lemma 3 thus holds for all k ∈ and t ∈ . APPENDIX C A PROOF OF PROPOSITION 6 Proof of Proposition 6: To prove this proposition, we will show that for any q 0 > K, the source s can always compute the mixing coefficients {c k : ∀k ∈ T } in Line 7 of the PE scheme, such that the key properties in Lemmas 2 and 3 hold with probability one. Then for any PE scheme, we can use the computed mixing coefficients {c k : ∀k ∈ T } instead of the randomly chosen ones, while attaining the same desired throughput performance. We first notice that the proof of Lemma 2 does not involve any probabilistic arguments. Therefore, Lemma 2 holds for any choices of the mixing coefficients with probability one. We use induction to prove that when using carefully computed mixing coefficients {c k : ∀k ∈ T }, Lemma 3 holds with probability one. We use the same notation of S t (X k,j ), v t (X k,j ), Ω R,k (t), Ω Z,k (t), Ω M,k , Ω ′ R as defined in Lemma 3 and its proof. 8 8 We note that Ω ′ R in (81) actually depends on the value of k and the time index (t − 1). Lemma 3 holds with probability one for any finite field GF(q 0 ). Assume that in the end of time (t− 1), Lemma 3 holds with probability one. Suppose T is chosen in the beginning of time t. Define B t as the set of k values satisfying: Note that this B t can be computed in the beginning of time t. Once B t is computed, we would like to choose the mixing coefficients {c l : ∀l ∈ T } such that the following equation is satisfied. Note that for any k ∈ B t , we have v t−1 (X k,j k ) / ∈ span(Ω Z,k (t−1), Ω R ′ ). Therefore if we choose the coefficients {c l : ∀l ∈ T } uniformly randomly, the probability that c k v t−1 (X k,j k ) + ∀l∈T \k c l v t−1 (X l,j l ) ∈ span(Ω Z,k (t − 1), Ω R ′ ) is at most 1 q0 . The probability that there is at least one k ∈ T satisfying (102) has probability at most |Bt| q0 ≤ K q0 . For any q 0 > K, we thus have a non-zero probability ≥ (1 − K q0 ) such that the uniformly random choice of {c l : ∀l ∈ T } will satisfy (101). Therefore, there must exist at least one {c l : ∀l ∈ T } satisfying (101). In the beginning of time t, we arbitrarily choose any such mixing coefficients {c l : ∀l ∈ T } that satisfy (101). The remaining task is to show that the above construction of {c k : ∀k ∈ T } guarantees that Lemma 3 holds in the end of time t with probability one, regardless the channel realization of time t. For those k / ∈ T , such k falls into Case 2 of the proof of Lemma 3. Since Case 2 holds with probability one, Lemma 3 is true for those k / ∈ T with probability one. For those k ∈ T and k ∈ S t−1 (X k,j k ), then such k falls into Case 1 of the proof of Lemma 3. Since Case 1 holds with probability one, Lemma 3 is true for those k ∈ T and k ∈ S t−1 (X k,j k ) with probability one. For those k satisfying: k ∈ T , k / ∈ S t−1 (X k,j k ), and v t−1 (X k,j k ) ∈ span(Ω Z,k (t − 1), Ω R ′ ), such k must fall into Case 1, Case 3.1.1, or Case 3.2.1, depending on whether S t (X k,j k ) = S t−1 (X k,j k ) and whether k ∈ S t (X k,j k ), respectively. Since Cases 1, 3.1.1, and 3.2.1 hold with probability one, Lemma 3 is true for those k with probability one. The remaining k's to consider are those k ∈ B t . If the random channel realization leads to S t (X k,j k ) = S t−1 (X k,j k ), then by Case 1 of the proof of Lemma 3, we must have Lemma 3 holds with conditional probability one. If the random channel realization leads to S t (X k,j k ) = S t−1 (X k,j k ) and k / ∈ S t (X k,j k ), then we are in Case 3.1.2. Since for those k ∈ B t we have chosen the mixing coefficients {c l : ∀l ∈ T } satisfying (101), following the same arguments as in (90) we must be able to rewrite v t (X k,j k ) as follows. v t (X k,j k ) = v tx = (c k + α)v t−1 (X k,j k ) + w where (c k + α) is a non-zero GF(q) coefficient, and w is a vector satisfying w ∈ span (Ω Z,k (t − 1), Ω ′ R ) = span (Ω Z,k (t), Ω ′ R ) . Following the same proof of Case 3.1.2 of Lemma 3, we must have Lemma 3 holds with conditional probability one. If the random channel realization leads to S t (X k,j k ) = S t−1 (X k,j k ) and k ∈ S t (X k,j k ), then we are in Case 3.2.2. Since for those k ∈ B t we have chosen the mixing coefficients {c l : ∀l ∈ T } satisfying (101), following the same arguments as in (97) we must be able to rewrite v t (X k,j k ) as follows. v t (X k,j k ) = v tx = (c k + α)v t−1 (X k,j k ) + w ′ where (c k + α) is a non-zero GF(q) coefficient, and w ′ is a vector satisfying w ′ ∈ span (Ω Z,k (t − 1), Ω ′ R ). Following the same proof of Case 3.2.2 of Lemma 3, we must have Lemma 3 holds with conditional probability one. Since regardless of the random channel realization, Lemma 3 holds with probability one, we have thus shown that one can always construct the desired mixing coefficients {c l : ∀l ∈ T } provided the finite field GF(q 0 ) satisfying q 0 > K. By induction on t, the proof is complete. APPENDIX D A KEY LEMMA FOR THE PROOF OF PROPOSITION 5 Consider an arbitrary spatially independent 1-to-K broadcast PEC with marginal success probabilities 0 < p 1 ≤ p 2 ≤ · · · ≤ p K . For any S ⊆ and S = , define We then have the following lemma: Lemma 5: Suppose the 1-to-K broadcast PEC is spatially independent with marginal success probabilities 0 < p 1 ≤ · · · ≤ p K . Consider any one-sidedly fair rate vector (R 1 , · · · , R K ) ∈ Λ osf , and any non-empty subset T ⊆ . For any k 1 , k 2 ∈ T with k 1 < k 2 , we have R k1 · L T \k1 ≥ R k2 · L T \k2 . Proof: Consider K independent geometric random variables X 1 to X K with success probability p 1 to p K . That is, the probability mass function F k (t) of any X k satisfies for all strictly positive integer t. For the sake of simplicity, here we omit the discussion of the degenerate case in which p k = 1. We say that the geometric random trial X k is finished at time t if X k = t. Taking the expectation of (106), we then have Solving the simultaneous equations (107), we have for all S ′ ⊆ and S ′ = . Intermediate Step 2: We will show that for any non-empty subset T ⊆ and any k 1 , k 2 ∈ T with k 1 < k 2 , we have For any realization (X 1 , · · · , X K ) = (x 1 , · · · , x K ), we use y \S , w S , and γ S to denote the corresponding values of Y \S , W S , and Γ S according to (103), (104), and (105), respectively. We then have Note that the only difference between E Γ T \k1 and E Γ T \k k is the underlying measures of X k1 and X k2 . Therefore, by the change of measure formula, we have Note that when γ T \k1 > 0, we must have y ( \T )∪{k1} > w T \k1 , which in turn implies that x k1 ≥ x k2 + 1. We then have where the last inequality follows from p k1 ≤ p k2 and x k1 ≥ x k2 + 1. Combining (109), (110), and (111), we thus have which implies (108).
A national team of researchers has developed a first-of-its-kind, 3D-printed guide that helps regrow both the sensory and motor functions of complex nerves after injury. The groundbreaking research has the potential to help more than 200,000 people annually who experience nerve injuries or disease. Collaborators on the project are from the University of Minnesota, Virginia Tech, University of Maryland, Princeton University, and Johns Hopkins University. Nerve regeneration is a complex process. Because of this complexity, regrowth of nerves after injury or disease is very rare, according to the Mayo Clinic. Nerve damage is often permanent. Advanced 3D printing methods may now be the solution. In a new study, published today in the journal Advanced Functional Materials, researchers used a combination of 3D imaging and 3D printing techniques to create a custom silicone guide implanted with biochemical cues to help nerve regeneration. The guide's effectiveness was tested in the lab using rats. To achieve their results, researchers used a 3D scanner to reverse engineer the structure of a rat's sciatic nerve. They then used a specialized, custom-built 3D printer to print a guide for regeneration. Incorporated into the guide were 3D-printed chemical cues to promote both motor and sensory nerve regeneration. The guide was then implanted into the rat by surgically grafting it to the cut ends of the nerve. Within about 10 to 12 weeks, the rat's ability to walk again was improved. "This represents an important proof of concept of the 3D printing of custom nerve guides for the regeneration of complex nerve injuries," said University of Minnesota mechanical engineering professor Michael McAlpine, the study's lead researcher. "Someday we hope that we could have a 3D scanner and printer right at the hospital to create custom nerve guides right on site to restore nerve function." Scanning and printing takes about an hour, but the body needs several weeks to regrow the nerves. McAlpine said previous studies have shown regrowth of linear nerves, but this is the first time a study has shown the creation of a custom guide for regrowth of a complex nerve like the Y-shaped sciatic nerve that has both sensory and motor branches. "The exciting next step would be to implant these guides in humans rather than rats," McAlpine said. In cases where a nerve is unavailable for scanning, McAlpine said there could someday be a "library" of scanned nerves from other people or cadavers that hospitals could use to create closely matched 3D-printed guides for patients. In addition to McAlpine, major contributors to the research team include Blake N. Johnson, Virginia Tech; Xiaofeng Jia, University of Maryland and Johns Hopkins University; and Karen Z. Lancaster, Esteban Engel, and Lynn W. Enquist, Princeton University. This research was funded by grants from the National Institutes of Health, the Defense Advanced Research Projects Agency, the Maryland Stem Cell Research Fund, and the Grand Challenges Program at Princeton University. To read more about the study entitled "3D Printed Anatomical Nerve Regeneration Pathways," visit the Advanced Functional Materials website.
/** * @author Rob Winch * @since 5.1 */ @SpringBootTest @AutoConfigureWebTestClient @RunWith(SpringJUnit4ClassRunner.class) public class ServerOauth2ResourceApplicationTests { @Autowired private WebTestClient rest; @Test public void getWhenValidTokenThenIsOk() { String token = "eyJhbGciOiJSUzI1NiJ9.eyJzY29wZSI6Im1lc3NhZ2U6cmVhZCIsImV4cCI6MzEwNjMyODEzMSwianRpIjoiOGY5ZjFiYzItOWVlMi00NTJkLThhMGEtODg3YmE4YmViYjYzIn0.CM_KulSsIrNXW1x6NFeN5VwKQiIW-LIAScJzakRFDox8Ql7o4WOb0ubY3CjWYnglwqYzBvH9McCFqVrUtzdfODY5tyEEJSxWndIGExOi2osrwRPsY3AGzNa23GMfC9I03BFP1IFCq4ZfL-L6yVcIjLke-rA40UG-r-oA7r-N_zsLc5poO7Azf29IQgQF0GSRp4AKQprYHF5Q-Nz9XkILMDz9CwPQ9cbdLCC9smvaGmEAjMUr-C1QgM-_ulb42gWtRDLorW_eArg8g-fmIP0_w82eNWCBjLTy-WaDMACnDVrrUVsUMCqx6jS6h8_uejKly2NFuhyueIHZTTySqCZoTA"; this.rest.get().uri("/") .headers(headers -> headers.setBearerAuth(token)) .exchange() .expectStatus().isOk() .expectBody(String.class).isEqualTo("Hello, null!"); } @Test public void getWhenNoTokenThenIsUnauthorized() { this.rest.get().uri("/") .exchange() .expectStatus().isUnauthorized() .expectHeader().valueEquals(HttpHeaders.WWW_AUTHENTICATE, "Bearer"); } @Test public void getWhenNone() { String token = "ew0KICAiYWxnIjogIm5vbmUiLA0KICAidHlwIjogIkpXVCINCn0.ew0KICAic3ViIjogIjEyMzQ1Njc4OTAiLA0KICAibmFtZSI6ICJKb2huIERvZSIsDQogICJpYXQiOiAxNTE2MjM5MDIyDQp9."; this.rest.get().uri("/") .headers(headers -> headers.setBearerAuth(token)) .exchange() .expectStatus().isUnauthorized() .expectHeader().valueEquals(HttpHeaders.WWW_AUTHENTICATE, "Bearer error=\"invalid_token\", error_description=\"Unsupported algorithm of none\", error_uri=\"https://tools.ietf.org/html/rfc6750#section-3.1\""); } @Test public void getWhenInvalidToken() { String token = "a"; this.rest.get().uri("/") .headers(headers -> headers.setBearerAuth(token)) .exchange() .expectStatus().isUnauthorized() .expectHeader().valueEquals(HttpHeaders.WWW_AUTHENTICATE, "Bearer error=\"invalid_token\", error_description=\"An error occurred while attempting to decode the Jwt: Invalid JWT serialization: Missing dot delimiter(s)\", error_uri=\"https://tools.ietf.org/html/rfc6750#section-3.1\""); } }
/** * Add walking event in user's calendar, notify user changes */ public void addWalkingEvent() { Log.d(TAG, "Debugger: addWalkingEvent()"); /*Cursor c0 = myDatabase.rawQuery("SELECT count(*) FROM RecordMyActivities WHERE " + "activity = 'Walking' and time < datetime('now', '-7 days')", null); //Cursor c0 = myDatabase.rawQuery("SELECT count(*) FROM RecordMyActivities // WHERE activity = 'Still' and time < datetime('now', '-6 days')", null); c0.moveToFirst(); c0.close(); */ Cursor c = myDatabase.rawQuery("SELECT confidence FROM RecordMyActivities WHERE" + " activity ='Walking' and time < datetime('now', '-7 days')", null); c.moveToFirst(); int sumMins = -1; int mins = 5; for (int i = 0; i < c.getCount(); i++) { sumMins = sumMins + mins * c.getInt(0); c.moveToNext(); } c.close(); int hours = -1; hours = sumMins / 60; if (hours < 12) { refreshResults(); } }
<filename>PrivateFrameworks/HomeKitBackingStore/HMBMirrorInput.h // // Generated by class-dump 3.5 (64 bit). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2013 by <NAME>. // #import "HMFObject.h" #import "HMFLogging.h" @class HMBLocalSQLContextInputBlock, HMBLocalZone, HMFActivity, NSString; @interface HMBMirrorInput : HMFObject <HMFLogging> { HMBLocalZone *_localZone; HMBLocalSQLContextInputBlock *_block; HMFActivity *_activity; } + (id)logCategory; @property(readonly, nonatomic) HMFActivity *activity; // @synthesize activity=_activity; @property(readonly, nonatomic) HMBLocalSQLContextInputBlock *block; // @synthesize block=_block; @property(readonly, nonatomic) __weak HMBLocalZone *localZone; // @synthesize localZone=_localZone; - (void).cxx_destruct; - (id)logIdentifier; - (id)abort; - (id)commitWithOptions:(id)arg1 error:(id *)arg2; - (id)removeModelWithExternalID:(id)arg1; - (id)removeModelWithModelID:(id)arg1; - (id)updateModelData:(id)arg1 modelEncoding:(unsigned long long)arg2 externalID:(id)arg3 externalData:(id)arg4; - (id)updateModel:(id)arg1 externalID:(id)arg2 externalData:(id)arg3; - (void)dealloc; - (id)initWithLocalZone:(id)arg1 block:(id)arg2; // Remaining properties @property(readonly, copy) NSString *debugDescription; @property(readonly, copy) NSString *description; @property(readonly) unsigned long long hash; @property(readonly) Class superclass; @end
<filename>application/src/main/java/com/sanctionco/thunder/crypto/BCryptHashService.java package com.sanctionco.thunder.crypto; import at.favre.lib.crypto.bcrypt.BCrypt; /** * Provides the BCrypt implementation for the {@link HashService}. Provides methods to hash and to * verify existing hashes match. * * @see HashService */ public class BCryptHashService extends HashService { BCryptHashService(boolean serverSideHashEnabled, boolean allowCommonMistakes) { super(serverSideHashEnabled, allowCommonMistakes); } @Override boolean isMatchExact(String plaintext, String hashed) { return BCrypt.verifyer().verify(plaintext.getBytes(), hashed.getBytes()).verified; } @Override public String hash(String plaintext) { if (serverSideHashEnabled()) { return BCrypt.withDefaults().hashToString(10, plaintext.toCharArray()); } return plaintext; } }
package com.axzae.homeassistant.fragment.control; import android.annotation.TargetApi; import android.app.AlertDialog; import android.app.Dialog; import android.graphics.drawable.Drawable; import android.os.Build; import android.os.Bundle; import android.support.annotation.NonNull; import android.support.v4.content.res.ResourcesCompat; import android.support.v4.graphics.drawable.DrawableCompat; import android.view.View; import android.widget.TextView; import com.axzae.homeassistant.R; import com.axzae.homeassistant.model.Entity; import com.axzae.homeassistant.model.rest.CallServiceRequest; import com.axzae.homeassistant.util.CommonUtil; /** * Simple fragment with blur effect behind. */ @TargetApi(Build.VERSION_CODES.HONEYCOMB) public class SwitchFragment extends BaseControlFragment implements View.OnClickListener { private TextView mButtonOff; private TextView mButtonOn; public static SwitchFragment newInstance(Entity entity) { SwitchFragment fragment = new SwitchFragment(); Bundle args = new Bundle(); args.putString("entity", CommonUtil.deflate(entity)); fragment.setArguments(args); return fragment; } @NonNull @Override public Dialog onCreateDialog(Bundle savedInstanceState) { AlertDialog.Builder builder = new AlertDialog.Builder(getActivity()); View rootView = getActivity().getLayoutInflater().inflate(R.layout.control_switch, null); builder.setView(rootView); builder.setTitle(mEntity.getFriendlyName()); rootView.findViewById(R.id.button_close).setOnClickListener(this); mButtonOff = rootView.findViewById(R.id.text_off); mButtonOn = rootView.findViewById(R.id.text_on); mButtonOff.setOnClickListener(this); mButtonOn.setOnClickListener(this); refreshUi(); return builder.create(); } private void refreshUi() { if (mEntity.isCurrentStateActive()) { mButtonOn.setTextColor(ResourcesCompat.getColor(getResources(), R.color.primary, null)); mButtonOff.setTextColor(ResourcesCompat.getColor(getResources(), R.color.md_grey_500, null)); } else { mButtonOff.setTextColor(ResourcesCompat.getColor(getResources(), R.color.primary, null)); mButtonOn.setTextColor(ResourcesCompat.getColor(getResources(), R.color.md_grey_500, null)); } } @Override public void onClick(View view) { switch (view.getId()) { case R.id.button_close: dismiss(); break; case R.id.text_off: callService("homeassistant", "turn_off", new CallServiceRequest(mEntity.entityId)); break; case R.id.text_on: callService("homeassistant", "turn_on", new CallServiceRequest(mEntity.entityId)); break; } } @Override public void onChange(Entity entity) { super.onChange(entity); refreshUi(); } }
/** * Server socket that discards incoming messages and collects avg msg/sec statistic. Used in latency experiments. * Usage: java deltix.util.net.TCPNull port */ public class TCPNull { private final ServerSocket serverSocket; private volatile long reads; private volatile long bytes; TCPNull(String host, int port) throws IOException { InetAddress address = (host != null) ? InetAddress.getByName(host) : null; serverSocket = new ServerSocket(port, 50, address); System.out.println("TCP Null is listening on port " + serverSocket.getLocalPort()); System.out.println("Assuming each message is " + MESSAGE_SIZE + " bytes"); } private void setupStatsTimer() { TimerRunner meter = new TimerRunner() { private long lastReads; private long lastBytes; protected void runInternal() throws Exception { long reads = TCPNull.this.reads; // volatile long bytes = TCPNull.this.bytes; // freeze long messages = (bytes - lastBytes) / MESSAGE_SIZE; System.out.println("Reads: " + (reads - lastReads) / STATISTICS_INTERVAL_S + "/sec; Messages: " + messages / STATISTICS_INTERVAL_S + "/sec"); lastReads = reads; lastBytes = bytes; } }; long statisticsIntervalMs = TimeUnit.SECONDS.toMillis(STATISTICS_INTERVAL_S); GlobalTimer.INSTANCE.scheduleAtFixedRate(meter, statisticsIntervalMs, statisticsIntervalMs); } void run() throws IOException { if (STATISTICS_INTERVAL_S > 0) setupStatsTimer(); Socket socket = serverSocket.accept(); configure(socket); print("Accepted new client", socket); final InputStream stream = socket.getInputStream(); new Thread() { @Override public void run() { byte[] buffer = new byte[MESSAGE_RECEIVE_BUFFER_SIZE]; try { while (true) { int bytesRead = stream.read(buffer); if (bytesRead < 0) break; reads++; bytes += bytesRead; } } catch (IOException iox) { System.err.println("Error in receiver thread"); iox.printStackTrace(); } } }.start(); } public static void main(String[] args) throws Exception { if (args.length == 0) { System.out.println("Command line args: <bind-port> <optional-bind-interface>"); return; } int port = Integer.parseInt(args[0]); String host = (args.length > 1) ? args[1] : null; TCPNull nul = new TCPNull(host, port); nul.run(); } }
/** * The type Collection ascii table aware. * * @param <T> the type parameter */ public class CollectionASCIITableAware<T> implements IASCIITableAware { private List<ASCIITableHeader> headers; private List<List<Object>> data; /** * Instantiates a new Collection ascii table aware. * * @param objList the obj list * @param properties the properties */ public CollectionASCIITableAware(final List<T> objList, final String... properties) { this(objList, Arrays.asList(properties), Arrays.asList(properties)); } /** * Instantiates a new Collection ascii table aware. * * @param objList the obj list * @param properties the properties * @param title the title */ public CollectionASCIITableAware(final List<T> objList, final List<String> properties, final List<String> title) { this.headers = null; this.data = null; if (objList != null && !objList.isEmpty() && properties != null && !properties.isEmpty()) { String header = null; this.headers = new ArrayList<ASCIITableHeader>(properties.size()); for (int i = 0; i < properties.size(); ++i) { header = ((i < title.size()) ? title.get(i) : properties.get(i)); this.headers.add(new ASCIITableHeader(String.valueOf(header).toUpperCase())); } this.data = new ArrayList<List<Object>>(); List<Object> rowData = null; final Class<?> dataClazz = objList.get(0).getClass(); final Map<String, Method> propertyMethodMap = new HashMap<String, Method>(); for (int j = 0; j < objList.size(); ++j) { rowData = new ArrayList<Object>(); for (int k = 0; k < properties.size(); ++k) { rowData.add(this.getProperty(propertyMethodMap, dataClazz, objList.get(j), properties.get(k))); } this.data.add(rowData); } } } private Object getProperty(final Map<String, Method> propertyMethodMap, final Class<?> dataClazz, final T obj, final String property) { Object cellValue = null; try { Method method = null; if (propertyMethodMap.containsKey(property)) { method = propertyMethodMap.get(property); } else { String methodName = "get" + this.capitalize(property); method = this.getMethod(dataClazz, methodName); if (method == null) { methodName = "is" + this.capitalize(property); method = this.getMethod(dataClazz, methodName); } if (method != null) { propertyMethodMap.put(property, method); } } cellValue = method.invoke(obj, new Object[0]); } catch (Exception ex) { } return cellValue; } private Method getMethod(final Class<?> dataClazz, final String methodName) { Method method = null; try { method = dataClazz.getMethod(methodName, (Class<?>[]) new Class[0]); } catch (Exception ex) { } return method; } private String capitalize(final String property) { return (property.length() == 0) ? property : (String.valueOf(property.substring(0, 1).toUpperCase()) + property.substring(1).toLowerCase()); } @Override public List<List<Object>> getData() { return this.data; } @Override public List<ASCIITableHeader> getHeaders() { return this.headers; } @Override public String formatData(final ASCIITableHeader header, final int row, final int col, final Object data) { try { final BigDecimal bd = new BigDecimal(data.toString()); return NumberFormat.getInstance().format(bd); } catch (Exception ex) { return null; } } }
Want an urban lifestyle without spending a fortune on rent? These cities — and several other mid-size metropolises — are attracting a growing number of 20-somethings who want the benefits of city living without the high rental prices that come with larger urban oases like Chicago, San Francisco, or New York City. If you're looking for a more affordable version of the Big Apple or the Windy City, why not try these five moderately sized cities? 1. Cambridge, Massachusetts When Niche — which annually publishes Best Cities lists — released its 2015 ranking of the best U.S. cities for young professionals, Cambridge, in the Boston area, held the top spot. Niche said that a high 27.7% percent of the Cambridge population is between the ages of 25 to 34 years old, which makes it easier for Millennials to find other like-minded adults. It also gets an A+ from Niche for the ease of its average commute. The unemployment rate in Cambridge is a mere 4.4%, while the city gets A or A+ ratings for its bar, restaurant, and coffee shop offerings. On the downside, it's not exactly cheap to rent here. Niche reported that the median rent for an apartment in Cambridge was $1,612. 2. Denver, Colorado Colorado's biggest city is attracting a steady stream of Millennials, too. City Observatory says that the percentage of 25 to 34-year-olds with college degrees increased here by 46.6% from 2000 through 2012. In a 2015 report, The Urban Land Institute listed Denver as one of the new top destinations for Millennials. The institute cited numbers from ApartmentList.com saying that the median cost of renting a two-bedroom apartment in Denver stood at $1,380 in 2015. That's not a rock-bottom price. But it's far lower than the average two-bedroom rental price in San Francisco ($4,350) or New York City ($3,260). There's plenty for Millennials to like about Denver. The city's light rail system is a top one, with 48 miles of track. Recently, Denver businesses and cycling fans ran a successful crowdfunding campaign to cover the $155,000 cost of a new protected bike lane that will run along Arapahoe Street downtown. Denver also has the great outdoors. Yes, Denver has all the restaurants, bars, and theaters that you'd expect from a big city. But its location means that Millennials also have easy access to hiking, rock climbing, and skiing. 3. Pittsburgh, Pennsylvania The Urban Land Institute also cited Pittsburgh as a top mid-size city for Millennials. This is because Pittsburgh has steadily transformed from a city focused on steel and manufacturing to one that is becoming a hub for technology, finance, and healthcare jobs. Pittsburgh also has plenty of affordable apartments. Walkable neighborhoods are common, and Millennials who live here can find plenty of boutique shops, gourmet restaurants, and trendy bars. How popular is Pittsburgh today with young adults? The Atlantic recently ran a story with the headline "What Millennials Love About Pittsburgh." 4. San Antonio, Texas Think San Antonio is home to the Alamo, a nice river walk, and little else? Think again. The Texas city is seeing a boom in aerospace, information technology, biosciences, and advanced manufacturing jobs. And these are the jobs that Millennials increasingly want. It's little surprise that last year, Forbes reported that San Antonio saw the largest increase in its Millennial population of any U.S. city from 2010 through 2013. According to Forbes, which relied on data from the U.S. Census Bureau, the number of residents from the ages of 20 to 29 increased by 9.2% during this time. As of 2013, San Antonio had 339,540 residents in this age range. You can join these Millennials for a relatively affordable price. Rent Jungle reported that as of August of this year, the average monthly rent within the San Antonio market stood at $1,035 for a two-bedroom unit and $847 for a one-bedroom apartment. 5. Indianapolis, Indiana Indianapolis doesn't get a ton of attention from hip young adults. But if you want to spend a smaller percentage of your monthly income on rent — and you'd like to live in the Midwest — you could do worse than moving to Indianapolis. Apartment site RentLingo reports that the median one-bedroom apartment rent in this city consumes just 11.7% of the average household's income. According to RentLingo, this makes Indianapolis the most affordable mid-size city in the country for renters. Get the Monitor Stories you care about delivered to your inbox. By signing up, you agree to our Privacy Policy Indianapolis isn't just affordable, either. Indiana's capital city boasts several top-ranked attractions that will appeal to Millennials. Lucas Oil Stadium is considered one of the best venues in the National Football League, while the still-newish Bankers Life Fieldhouse, home to the Pacers, ranks as one of the top stadiums in the NBA. For the non-sports fan, Indianapolis offers the Indianapolis Museum of Art, Indiana Repertory Theatre, and the Kurt Vonnegut Memorial Library. This article first appeared at Wise Bread.
<reponame>drinkcoffee/codewitness package tech.pegasys.poc.witnesscodeanalysis.trace.dataset8m; public class TraceTransactionResult { // "result":{"gasUsed":"0x0","output":"0x"} String gasUsed; String output; }
/// Return `true` if any of the entities matches this type condition fn matches(&self, entities: &Vec<Node>) -> bool { use TypeCondition::*; match self { Any => true, On(name) => entities.iter().any(|entity| entity.typename() == name), } }
/** * Open an object that extends Storable and has been saved to the file system. * * @param <T> return type of object to open. * @param type class type of object to open. * @param uuid the ID of the object to open. * @return the opened object if it can be found and is not locked, otherwise null. */ public static <T extends Storable> T open(Class<T> type, String uuid){ try{ if(uuid == null || uuid.isEmpty()){ throw new RuntimeException("Cannot open a storable object with a null or empty UUID!"); } String path = getSaveLocation(type, uuid); File file = new File(path); if(!file.exists()){ throw new RuntimeException("File does not exist! Path: " + path); } if(!file.isFile()){ throw new RuntimeException("File is not a file! Path: " + path); } if(!file.canRead()){ throw new RuntimeException("Cannot read the file! Path: " + path); } try(ObjectInputStream in = new ObjectInputStream(new InflaterInputStream(new FileInputStream(file)))){ return (T) in.readObject(); }catch(Exception e){ LOG.log(Level.SEVERE, "Unable to read object file! Path: " + path, e); return null; } }catch(RuntimeException e){ LOG.log(Level.SEVERE, "Unable to open file!", e); return null; } }
<filename>src/components/mwcpl-dialog/mwcpl-dialog.tsx import { Component, ComponentInterface, h, Prop, Host } from '@stencil/core'; @Component({ tag: 'mwcpl-dialog', styleUrl: 'mwcpl-dialog.scss', shadow: true, }) export class MwcplDialog implements ComponentInterface { /** * The title of the dialog. */ @Prop() heading: string; /** * Controls the visibility of the dialog. */ @Prop({ reflect: true }) open: boolean; /** * Dismisses the dialog by clicking on the backdrop. */ @Prop() dismissable: boolean; render() { if (this.open) { return ( <Host> <div class="dialog"> <div class="backdrop" onClick={() => this.dismiss()}></div> <div class="container"> <h2 class="heading">{this.heading}</h2> <div class="content"> <slot></slot> </div> <div class="actions" onClick={(e) => this.handleClick(e)}> <slot name="secondary-action"></slot> <div class="spacer"></div> <slot name="primary-action"></slot> </div> </div> </div> </Host> ); } } dismiss() { if (this.dismissable) { this.open = false; } } handleClick(event: MouseEvent) { if (event) { const target = event.target as HTMLElement; if (target.getAttribute('closeDialog') == '') { this.open = false; } } } }
package android.support.v4.view; import android.content.Context; import android.os.Parcel; import android.os.Parcelable; import android.support.v4.os.ParcelableCompat; import android.support.v4.os.ParcelableCompatCreatorCallbacks; import android.util.AttributeSet; import android.view.MotionEvent; import android.widget.Toast; import net.i2p.android.router.R; import net.i2p.android.router.util.Util; public class CustomViewPager extends ViewPager { private boolean mEnabled; private int mFixedPage; private int mFixedPageString; public CustomViewPager(Context context, AttributeSet attrs) { super(context, attrs); mEnabled = false; mFixedPage = -1; mFixedPageString = 0; } @Override public boolean onTouchEvent(MotionEvent event) { return mEnabled && mFixedPage < 0 && super.onTouchEvent(event); } @Override public boolean onInterceptTouchEvent(MotionEvent event) { // See Nov. 20, 2013 comment at: // https://github.com/JakeWharton/ViewPagerIndicator/pull/257 // Our ticket #2488 // prevent NPE if fake dragging and touching ViewPager if(isFakeDragging()) return false; return mEnabled && mFixedPage < 0 && super.onInterceptTouchEvent(event); } @Override public void setCurrentItem(int item) { if ((mEnabled && (mFixedPage < 0 || item == mFixedPage)) || (!mEnabled && item == 0)) super.setCurrentItem(item); else if (!mEnabled) Toast.makeText(getContext(), Util.getRouterContext() == null ? R.string.router_not_running : R.string.router_shutting_down, Toast.LENGTH_SHORT).show(); else if (mFixedPageString > 0) Toast.makeText(getContext(), getContext().getString(mFixedPageString), Toast.LENGTH_SHORT).show(); } public void setPagingEnabled(boolean enabled) { mEnabled = enabled; updatePagingState(); } public void setFixedPage(int page, int res) { mFixedPage = page; mFixedPageString = res; updatePagingState(); } public void updatePagingState() { if (mEnabled) { if (mFixedPage >= 0 && getCurrentItem() != mFixedPage) setCurrentItem(mFixedPage); } else if (getCurrentItem() != 0) setCurrentItem(0); } public static class SavedState extends ViewPager.SavedState { boolean enabled; int fixedPage; int fixedPageString; public SavedState(Parcelable superState) { super(superState); } @Override public void writeToParcel(Parcel out, int flags) { super.writeToParcel(out, flags); out.writeInt(enabled ? 1 : 0); out.writeInt(fixedPage); out.writeInt(fixedPageString); } @Override public String toString() { return "CustomViewPager.SavedState{" + Integer.toHexString(System.identityHashCode(this)) + " enabled=" + enabled + " fixedPage=" + fixedPage + "}"; } public static final Parcelable.Creator<SavedState> CREATOR = ParcelableCompat.newCreator(new ParcelableCompatCreatorCallbacks<SavedState>() { @Override public SavedState createFromParcel(Parcel in, ClassLoader loader) { return new SavedState(in, loader); } @Override public SavedState[] newArray(int size) { return new SavedState[size]; } }); SavedState(Parcel in, ClassLoader loader) { super(in, loader); enabled = in.readInt() != 0; fixedPage = in.readInt(); fixedPageString = in.readInt(); } } @Override public Parcelable onSaveInstanceState() { Parcelable superState = super.onSaveInstanceState(); SavedState ss = new SavedState(superState); ss.enabled = mEnabled; ss.fixedPage = mFixedPage; ss.fixedPageString = mFixedPageString; return ss; } @Override public void onRestoreInstanceState(Parcelable state) { if (!(state instanceof SavedState)) { super.onRestoreInstanceState(state); return; } SavedState ss = (SavedState)state; super.onRestoreInstanceState(ss.getSuperState()); mEnabled = ss.enabled; mFixedPage = ss.fixedPage; mFixedPageString = ss.fixedPageString; } }
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use {argh::FromArgs, ffx_core::ffx_command}; #[ffx_command()] #[derive(FromArgs, Debug, PartialEq)] /// Interact with the tracing subsystem #[argh(subcommand, name = "trace")] pub struct TraceCommand { #[argh(subcommand)] pub sub_cmd: TraceSubCommand, } #[derive(FromArgs, PartialEq, Debug)] #[argh(subcommand)] pub enum TraceSubCommand { ListProviders(ListProviders), Start(Start), Stop(Stop), // More commands including `record` and `convert` to follow. } #[derive(FromArgs, PartialEq, Debug)] /// List the target's trace providers #[argh(subcommand, name = "list-providers")] pub struct ListProviders {} // Work around argh's handling of Vec. Listing categories as a comma // separated list of values rather than a repeated keyed option // is much more concise when dealing with a large set of categories. pub type TraceCategories = Vec<String>; // This list should be kept in sync with DEFAULT_CATEGORIES in // //src/testing/sl4f/src/tracing/facade.rs as well as the help text below static DEFAULT_CATEGORIES: &str = "app,audio,benchmark,blobfs,gfx,input,kernel:meta,kernel:sched,ledger,magma,minfs,modular,view,flutter,dart,dart:compiler,dart:dart,dart:debugger,dart:embedder,dart:gc,dart:isolate,dart:profiler,dart:vm"; #[derive(FromArgs, PartialEq, Debug)] /// Record a trace #[argh(subcommand, name = "stop")] pub struct Stop { /// name of output trace file. Defaults to trace.fxt. #[argh(option, default = "String::from(\"trace.fxt\")")] pub output: String, } #[derive(FromArgs, PartialEq, Debug)] /// Record a trace #[argh(subcommand, name = "start")] pub struct Start { /// size of per-provider trace buffer in MB. Defaults to 4. #[argh(option, default = "4")] pub buffer_size: u32, /// comma-separated list of categories to enable. Defaults /// to "app,audio,benchmark,blobfs,gfx,input,kernel:meta, /// kernel:sched,ledger,magma,minfs,modular,view,flutter, /// dart,dart:compiler,dart:dart,dart:debugger,dart:embedder, /// dart:gc,dart:isolate,dart:profiler,dart:vm" #[argh( option, default = "parse_categories(DEFAULT_CATEGORIES).unwrap()", from_str_fn(parse_categories) )] pub categories: TraceCategories, /// duration of trace capture in seconds. If not set, interactive /// mode is used. #[argh(option)] pub duration: Option<f64>, /// name of output trace file. Defaults to trace.fxt. #[argh(option, default = "String::from(\"trace.fxt\")")] pub output: String, } fn parse_categories(value: &str) -> Result<TraceCategories, String> { let mut cats = Vec::new(); if value.is_empty() { return Err("no categories specified".to_string()); } for cat in value.split(",") { if cat.is_empty() { return Err("empty category specified".to_string()); } cats.push(String::from(cat)); } Ok(cats) } #[cfg(test)] mod tests { use super::*; #[test] fn test_parse_categories() { assert_eq!(parse_categories(&"a"), Ok(vec!["a".to_string()])); assert_eq!( parse_categories(&"a,b,c:d"), Ok(vec!["a".to_string(), "b".to_string(), "c:d".to_string()]) ); assert_eq!(parse_categories(&""), Err("no categories specified".to_string())); assert_eq!(parse_categories(&"a,,b"), Err("empty category specified".to_string())); } }
def prepare_data(data): data = data.copy() data = _drop_unbalanced_matches(data) _check_data(data) return data