text
stringlengths 2
100k
| meta
dict |
---|---|
// SPDX-License-Identifier: GPL-2.0
#include <api/fd/array.h>
#include <poll.h>
#include "util/debug.h"
#include "tests/tests.h"
static void fdarray__init_revents(struct fdarray *fda, short revents)
{
int fd;
fda->nr = fda->nr_alloc;
for (fd = 0; fd < fda->nr; ++fd) {
fda->entries[fd].fd = fda->nr - fd;
fda->entries[fd].revents = revents;
}
}
static int fdarray__fprintf_prefix(struct fdarray *fda, const char *prefix, FILE *fp)
{
int printed = 0;
if (verbose <= 0)
return 0;
printed += fprintf(fp, "\n%s: ", prefix);
return printed + fdarray__fprintf(fda, fp);
}
int test__fdarray__filter(struct test *test __maybe_unused, int subtest __maybe_unused)
{
int nr_fds, expected_fd[2], fd, err = TEST_FAIL;
struct fdarray *fda = fdarray__new(5, 5);
if (fda == NULL) {
pr_debug("\nfdarray__new() failed!");
goto out;
}
fdarray__init_revents(fda, POLLIN);
nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
if (nr_fds != fda->nr_alloc) {
pr_debug("\nfdarray__filter()=%d != %d shouldn't have filtered anything",
nr_fds, fda->nr_alloc);
goto out_delete;
}
fdarray__init_revents(fda, POLLHUP);
nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
if (nr_fds != 0) {
pr_debug("\nfdarray__filter()=%d != %d, should have filtered all fds",
nr_fds, fda->nr_alloc);
goto out_delete;
}
fdarray__init_revents(fda, POLLHUP);
fda->entries[2].revents = POLLIN;
expected_fd[0] = fda->entries[2].fd;
pr_debug("\nfiltering all but fda->entries[2]:");
fdarray__fprintf_prefix(fda, "before", stderr);
nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
fdarray__fprintf_prefix(fda, " after", stderr);
if (nr_fds != 1) {
pr_debug("\nfdarray__filter()=%d != 1, should have left just one event", nr_fds);
goto out_delete;
}
if (fda->entries[0].fd != expected_fd[0]) {
pr_debug("\nfda->entries[0].fd=%d != %d\n",
fda->entries[0].fd, expected_fd[0]);
goto out_delete;
}
fdarray__init_revents(fda, POLLHUP);
fda->entries[0].revents = POLLIN;
expected_fd[0] = fda->entries[0].fd;
fda->entries[3].revents = POLLIN;
expected_fd[1] = fda->entries[3].fd;
pr_debug("\nfiltering all but (fda->entries[0], fda->entries[3]):");
fdarray__fprintf_prefix(fda, "before", stderr);
nr_fds = fdarray__filter(fda, POLLHUP, NULL, NULL);
fdarray__fprintf_prefix(fda, " after", stderr);
if (nr_fds != 2) {
pr_debug("\nfdarray__filter()=%d != 2, should have left just two events",
nr_fds);
goto out_delete;
}
for (fd = 0; fd < 2; ++fd) {
if (fda->entries[fd].fd != expected_fd[fd]) {
pr_debug("\nfda->entries[%d].fd=%d != %d\n", fd,
fda->entries[fd].fd, expected_fd[fd]);
goto out_delete;
}
}
pr_debug("\n");
err = 0;
out_delete:
fdarray__delete(fda);
out:
return err;
}
int test__fdarray__add(struct test *test __maybe_unused, int subtest __maybe_unused)
{
int err = TEST_FAIL;
struct fdarray *fda = fdarray__new(2, 2);
if (fda == NULL) {
pr_debug("\nfdarray__new() failed!");
goto out;
}
#define FDA_CHECK(_idx, _fd, _revents) \
if (fda->entries[_idx].fd != _fd) { \
pr_debug("\n%d: fda->entries[%d](%d) != %d!", \
__LINE__, _idx, fda->entries[1].fd, _fd); \
goto out_delete; \
} \
if (fda->entries[_idx].events != (_revents)) { \
pr_debug("\n%d: fda->entries[%d].revents(%d) != %d!", \
__LINE__, _idx, fda->entries[_idx].fd, _revents); \
goto out_delete; \
}
#define FDA_ADD(_idx, _fd, _revents, _nr) \
if (fdarray__add(fda, _fd, _revents) < 0) { \
pr_debug("\n%d: fdarray__add(fda, %d, %d) failed!", \
__LINE__,_fd, _revents); \
goto out_delete; \
} \
if (fda->nr != _nr) { \
pr_debug("\n%d: fdarray__add(fda, %d, %d)=%d != %d", \
__LINE__,_fd, _revents, fda->nr, _nr); \
goto out_delete; \
} \
FDA_CHECK(_idx, _fd, _revents)
FDA_ADD(0, 1, POLLIN, 1);
FDA_ADD(1, 2, POLLERR, 2);
fdarray__fprintf_prefix(fda, "before growing array", stderr);
FDA_ADD(2, 35, POLLHUP, 3);
if (fda->entries == NULL) {
pr_debug("\nfdarray__add(fda, 35, POLLHUP) should have allocated fda->pollfd!");
goto out_delete;
}
fdarray__fprintf_prefix(fda, "after 3rd add", stderr);
FDA_ADD(3, 88, POLLIN | POLLOUT, 4);
fdarray__fprintf_prefix(fda, "after 4th add", stderr);
FDA_CHECK(0, 1, POLLIN);
FDA_CHECK(1, 2, POLLERR);
FDA_CHECK(2, 35, POLLHUP);
FDA_CHECK(3, 88, POLLIN | POLLOUT);
#undef FDA_ADD
#undef FDA_CHECK
pr_debug("\n");
err = 0;
out_delete:
fdarray__delete(fda);
out:
return err;
}
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="14.0" DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" />
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
<ProjectGuid>{20549E2F-7B55-4F61-A969-02F7C97DDC7C}</ProjectGuid>
<OutputType>Exe</OutputType>
<AppDesignerFolder>Properties</AppDesignerFolder>
<RootNamespace>FillDataSetUsingSqlDataAdapter</RootNamespace>
<AssemblyName>FillDataSetUsingSqlDataAdapter</AssemblyName>
<TargetFrameworkVersion>v4.6</TargetFrameworkVersion>
<FileAlignment>512</FileAlignment>
<AutoGenerateBindingRedirects>true</AutoGenerateBindingRedirects>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
<PlatformTarget>AnyCPU</PlatformTarget>
<DebugSymbols>true</DebugSymbols>
<DebugType>full</DebugType>
<Optimize>false</Optimize>
<OutputPath>bin\Debug\</OutputPath>
<DefineConstants>DEBUG;TRACE</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
<PlatformTarget>AnyCPU</PlatformTarget>
<DebugType>pdbonly</DebugType>
<Optimize>true</Optimize>
<OutputPath>bin\Release\</OutputPath>
<DefineConstants>TRACE</DefineConstants>
<ErrorReport>prompt</ErrorReport>
<WarningLevel>4</WarningLevel>
</PropertyGroup>
<ItemGroup>
<Reference Include="System" />
<Reference Include="System.Core" />
<Reference Include="System.Xml.Linq" />
<Reference Include="System.Data.DataSetExtensions" />
<Reference Include="Microsoft.CSharp" />
<Reference Include="System.Data" />
<Reference Include="System.Net.Http" />
<Reference Include="System.Xml" />
</ItemGroup>
<ItemGroup>
<Compile Include="Program.cs" />
<Compile Include="Properties\AssemblyInfo.cs" />
</ItemGroup>
<ItemGroup>
<None Include="App.config" />
</ItemGroup>
<Import Project="$(MSBuildToolsPath)\Microsoft.CSharp.targets" />
<!-- To modify your build process, add your task inside one of the targets below and uncomment it.
Other similar extension points exist, see Microsoft.Common.targets.
<Target Name="BeforeBuild">
</Target>
<Target Name="AfterBuild">
</Target>
-->
</Project> | {
"pile_set_name": "Github"
} |
<filter name='no-arp-mac-spoofing' chain='arp-mac' priority='-520'>
<rule action='return' direction='out' priority='350' >
<arp match='yes' arpsrcmacaddr='$MAC'/>
</rule>
<!-- drop everything else -->
<rule action='drop' direction='out' priority='1000' />
</filter>
| {
"pile_set_name": "Github"
} |
using System.Reflection;
using System.Runtime.CompilerServices;
//
// General Information about an assembly is controlled through the following
// set of attributes. Change these attribute values to modify the information
// associated with an assembly.
//
[assembly: AssemblyTitle("")]
[assembly: AssemblyDescription("")]
[assembly: AssemblyConfiguration("")]
[assembly: AssemblyCompany("")]
[assembly: AssemblyProduct("")]
[assembly: AssemblyCopyright("")]
[assembly: AssemblyTrademark("")]
[assembly: AssemblyCulture("")]
//
// Version information for an assembly consists of the following four values:
//
// Major Version
// Minor Version
// Build Number
// Revision
//
// You can specify all the values or you can default the Revision and Build Numbers
// by using the '*' as shown below:
[assembly: AssemblyVersion("1.0.*")]
//
// In order to sign your assembly you must specify a key to use. Refer to the
// Microsoft .NET Framework documentation for more information on assembly signing.
//
// Use the attributes below to control which key is used for signing.
//
// Notes:
// (*) If no key is specified, the assembly is not signed.
// (*) KeyName refers to a key that has been installed in the Crypto Service
// Provider (CSP) on your machine. KeyFile refers to a file which contains
// a key.
// (*) If the KeyFile and the KeyName values are both specified, the
// following processing occurs:
// (1) If the KeyName can be found in the CSP, that key is used.
// (2) If the KeyName does not exist and the KeyFile does exist, the key
// in the KeyFile is installed into the CSP and used.
// (*) In order to create a KeyFile, you can use the sn.exe (Strong Name) utility.
// When specifying the KeyFile, the location of the KeyFile should be
// relative to the project output directory which is
// %Project Directory%\obj\<configuration>. For example, if your KeyFile is
// located in the project directory, you would specify the AssemblyKeyFile
// attribute as [assembly: AssemblyKeyFile("..\\..\\mykey.snk")]
// (*) Delay Signing is an advanced option - see the Microsoft .NET Framework
// documentation for more information on this.
//
[assembly: AssemblyDelaySign(false)]
[assembly: AssemblyKeyFile("")]
[assembly: AssemblyKeyName("")]
| {
"pile_set_name": "Github"
} |
import React from 'react';
import PropTypes from 'prop-types';
import { c, msgid } from 'ttag';
import { APPS } from 'proton-shared/lib/constants';
import { AppLink, SimpleDropdown, DropdownMenu } from '../../components';
const DomainAddresses = ({ domainAddresses }) => {
const list = domainAddresses.map(({ ID, Email }) => (
<div key={ID} className="inbl w100 pt0-5 pb0-5 pl1 pr1 ellipsis">
{Email}
</div>
));
const n = list.length;
const addressesTxt = ` ${c('Info').ngettext(msgid`address`, `addresses`, n)}`;
const contentDropDown = (
<>
{n}
<span className="nomobile">{addressesTxt}</span>
</>
); // trick for responsive and mobile display
return (
<>
<SimpleDropdown className="pm-button--link" content={contentDropDown}>
<DropdownMenu>{list}</DropdownMenu>
<div className="alignright p1">
<AppLink className="pm-button" to="/addresses" toApp={APPS.PROTONMAIL_SETTINGS}>{c('Link')
.t`Manage`}</AppLink>
</div>
</SimpleDropdown>
</>
);
};
DomainAddresses.propTypes = {
domainAddresses: PropTypes.array.isRequired,
};
export default DomainAddresses;
| {
"pile_set_name": "Github"
} |
//
// WikipediaSearchCell.swift
// RxExample
//
// Created by Krunoslav Zaher on 3/28/15.
// Copyright © 2015 Krunoslav Zaher. All rights reserved.
//
import UIKit
import RxSwift
import RxCocoa
public class WikipediaSearchCell: UITableViewCell {
@IBOutlet var titleOutlet: UILabel!
@IBOutlet var URLOutlet: UILabel!
@IBOutlet var imagesOutlet: UICollectionView!
var disposeBag: DisposeBag?
let imageService = DefaultImageService.sharedImageService
public override func awakeFromNib() {
super.awakeFromNib()
self.imagesOutlet.register(UINib(nibName: "WikipediaImageCell", bundle: nil), forCellWithReuseIdentifier: "ImageCell")
}
var viewModel: SearchResultViewModel? {
didSet {
let disposeBag = DisposeBag()
guard let viewModel = viewModel else {
return
}
viewModel.title
.map(Optional.init)
.drive(self.titleOutlet.rx.text)
.disposed(by: disposeBag)
self.URLOutlet.text = viewModel.searchResult.URL.absoluteString
let reachabilityService = Dependencies.sharedDependencies.reachabilityService
viewModel.imageURLs
.drive(self.imagesOutlet.rx.items(cellIdentifier: "ImageCell", cellType: CollectionViewImageCell.self)) { [weak self] (_, url, cell) in
cell.downloadableImage = self?.imageService.imageFromURL(url, reachabilityService: reachabilityService) ?? Observable.empty()
#if DEBUG
//cell.installHackBecauseOfAutomationLeaksOnIOS10(firstViewThatDoesntLeak: self!.superview!.superview!)
#endif
}
.disposed(by: disposeBag)
self.disposeBag = disposeBag
#if DEBUG
self.installHackBecauseOfAutomationLeaksOnIOS10(firstViewThatDoesntLeak: self.superview!.superview!)
#endif
}
}
public override func prepareForReuse() {
super.prepareForReuse()
self.viewModel = nil
self.disposeBag = nil
}
deinit {
}
}
private protocol ReusableView: class {
var disposeBag: DisposeBag? { get }
func prepareForReuse()
}
extension WikipediaSearchCell : ReusableView {
}
extension CollectionViewImageCell : ReusableView {
}
private extension ReusableView {
func installHackBecauseOfAutomationLeaksOnIOS10(firstViewThatDoesntLeak: UIView) {
if #available(iOS 10.0, *) {
if OSApplication.isInUITest {
// !!! on iOS 10 automation tests leak cells, 🍻 automation team
// !!! fugly workaround
// ... no, I'm not assuming prepareForReuse is always called before init, this is
// just a workaround because that method already has cleanup logic :(
// Remember that leaking UISwitch?
// https://github.com/ReactiveX/RxSwift/issues/842
// Well it just got some new buddies to hang around with
firstViewThatDoesntLeak.rx.deallocated.subscribe(onNext: { [weak self] _ in
self?.prepareForReuse()
})
.disposed(by: self.disposeBag!)
}
}
}
}
| {
"pile_set_name": "Github"
} |
---
layout: base
title: 'Statistics of csubj in UD_Czech-PDT'
udver: '2'
---
## Treebank Statistics: UD_Czech-PDT: Relations: `csubj`
This relation is universal.
There are 1 language-specific subtypes of `csubj`: <tt><a href="cs_pdt-dep-csubj-pass.html">csubj:pass</a></tt>.
5664 nodes (0%) are attached to their parents as `csubj`.
5136 instances of `csubj` (91%) are left-to-right (parent precedes child).
Average distance between parent and child is 4.49046610169492.
The following 30 pairs of parts of speech are connected with `csubj`: <tt><a href="cs_pdt-pos-VERB.html">VERB</a></tt>-<tt><a href="cs_pdt-pos-VERB.html">VERB</a></tt> (2606; 46% instances), <tt><a href="cs_pdt-pos-ADJ.html">ADJ</a></tt>-<tt><a href="cs_pdt-pos-VERB.html">VERB</a></tt> (1982; 35% instances), <tt><a href="cs_pdt-pos-NOUN.html">NOUN</a></tt>-<tt><a href="cs_pdt-pos-VERB.html">VERB</a></tt> (664; 12% instances), <tt><a href="cs_pdt-pos-VERB.html">VERB</a></tt>-<tt><a href="cs_pdt-pos-ADJ.html">ADJ</a></tt> (89; 2% instances), <tt><a href="cs_pdt-pos-ADJ.html">ADJ</a></tt>-<tt><a href="cs_pdt-pos-ADJ.html">ADJ</a></tt> (71; 1% instances), <tt><a href="cs_pdt-pos-ADV.html">ADV</a></tt>-<tt><a href="cs_pdt-pos-VERB.html">VERB</a></tt> (59; 1% instances), <tt><a href="cs_pdt-pos-VERB.html">VERB</a></tt>-<tt><a href="cs_pdt-pos-NOUN.html">NOUN</a></tt> (48; 1% instances), <tt><a href="cs_pdt-pos-NOUN.html">NOUN</a></tt>-<tt><a href="cs_pdt-pos-ADJ.html">ADJ</a></tt> (30; 1% instances), <tt><a href="cs_pdt-pos-NUM.html">NUM</a></tt>-<tt><a href="cs_pdt-pos-VERB.html">VERB</a></tt> (30; 1% instances), <tt><a href="cs_pdt-pos-ADJ.html">ADJ</a></tt>-<tt><a href="cs_pdt-pos-NOUN.html">NOUN</a></tt> (26; 0% instances), <tt><a href="cs_pdt-pos-NOUN.html">NOUN</a></tt>-<tt><a href="cs_pdt-pos-NOUN.html">NOUN</a></tt> (15; 0% instances), <tt><a href="cs_pdt-pos-PRON.html">PRON</a></tt>-<tt><a href="cs_pdt-pos-VERB.html">VERB</a></tt> (10; 0% instances), <tt><a href="cs_pdt-pos-DET.html">DET</a></tt>-<tt><a href="cs_pdt-pos-VERB.html">VERB</a></tt> (8; 0% instances), <tt><a href="cs_pdt-pos-VERB.html">VERB</a></tt>-<tt><a href="cs_pdt-pos-DET.html">DET</a></tt> (4; 0% instances), <tt><a href="cs_pdt-pos-ADJ.html">ADJ</a></tt>-<tt><a href="cs_pdt-pos-ADV.html">ADV</a></tt> (2; 0% instances), <tt><a href="cs_pdt-pos-ADJ.html">ADJ</a></tt>-<tt><a href="cs_pdt-pos-DET.html">DET</a></tt> (2; 0% instances), <tt><a href="cs_pdt-pos-ADV.html">ADV</a></tt>-<tt><a href="cs_pdt-pos-ADJ.html">ADJ</a></tt> (2; 0% instances), <tt><a href="cs_pdt-pos-ADV.html">ADV</a></tt>-<tt><a href="cs_pdt-pos-NOUN.html">NOUN</a></tt> (2; 0% instances), <tt><a href="cs_pdt-pos-PRON.html">PRON</a></tt>-<tt><a href="cs_pdt-pos-NOUN.html">NOUN</a></tt> (2; 0% instances), <tt><a href="cs_pdt-pos-VERB.html">VERB</a></tt>-<tt><a href="cs_pdt-pos-ADV.html">ADV</a></tt> (2; 0% instances), <tt><a href="cs_pdt-pos-ADJ.html">ADJ</a></tt>-<tt><a href="cs_pdt-pos-NUM.html">NUM</a></tt> (1; 0% instances), <tt><a href="cs_pdt-pos-ADJ.html">ADJ</a></tt>-<tt><a href="cs_pdt-pos-PROPN.html">PROPN</a></tt> (1; 0% instances), <tt><a href="cs_pdt-pos-ADJ.html">ADJ</a></tt>-<tt><a href="cs_pdt-pos-SYM.html">SYM</a></tt> (1; 0% instances), <tt><a href="cs_pdt-pos-DET.html">DET</a></tt>-<tt><a href="cs_pdt-pos-NOUN.html">NOUN</a></tt> (1; 0% instances), <tt><a href="cs_pdt-pos-NOUN.html">NOUN</a></tt>-<tt><a href="cs_pdt-pos-ADV.html">ADV</a></tt> (1; 0% instances), <tt><a href="cs_pdt-pos-NOUN.html">NOUN</a></tt>-<tt><a href="cs_pdt-pos-DET.html">DET</a></tt> (1; 0% instances), <tt><a href="cs_pdt-pos-PRON.html">PRON</a></tt>-<tt><a href="cs_pdt-pos-ADJ.html">ADJ</a></tt> (1; 0% instances), <tt><a href="cs_pdt-pos-VERB.html">VERB</a></tt>-<tt><a href="cs_pdt-pos-NUM.html">NUM</a></tt> (1; 0% instances), <tt><a href="cs_pdt-pos-VERB.html">VERB</a></tt>-<tt><a href="cs_pdt-pos-PRON.html">PRON</a></tt> (1; 0% instances), <tt><a href="cs_pdt-pos-VERB.html">VERB</a></tt>-<tt><a href="cs_pdt-pos-PROPN.html">PROPN</a></tt> (1; 0% instances).
~~~ conllu
# visual-style 4 bgColor:blue
# visual-style 4 fgColor:white
# visual-style 3 bgColor:blue
# visual-style 3 fgColor:white
# visual-style 3 4 csubj color:blue
1 Přesto přesto ADV Dg-------1A---- Degree=Pos|Polarity=Pos 3 cc 3:cc _
2 však však CCONJ J^------------- _ 3 cc 3:cc _
3 lze lze VERB VB-S---3P-AA--- Mood=Ind|Number=Sing|Person=3|Polarity=Pos|Tense=Pres|VerbForm=Fin|Voice=Act 0 root 0:root _
4 zaznamenat zaznamenat VERB Vf--------A---- Aspect=Perf|Polarity=Pos|VerbForm=Inf 3 csubj 3:csubj _
5 v v ADP RR--6---------- AdpType=Prep|Case=Loc 6 case 6:case LId=v-1
6 podniku podnik NOUN NNIS6-----A---- Animacy=Inan|Case=Loc|Gender=Masc|Number=Sing|Polarity=Pos 3 obl 3:obl:v:loc _
7 citelné citelný ADJ AAFP4----1A---- Case=Acc|Degree=Pos|Gender=Fem|Number=Plur|Polarity=Pos 8 amod 8:amod _
8 změny změna NOUN NNFP4-----A---- Case=Acc|Gender=Fem|Number=Plur|Polarity=Pos 4 obj 4:obj SpaceAfter=No
9 . . PUNCT Z:------------- _ 3 punct 3:punct _
~~~
~~~ conllu
# visual-style 9 bgColor:blue
# visual-style 9 fgColor:white
# visual-style 6 bgColor:blue
# visual-style 6 fgColor:white
# visual-style 6 9 csubj color:blue
1 Změny změna NOUN NNFP1-----A---- Case=Nom|Gender=Fem|Number=Plur|Polarity=Pos 3 nsubj 3:nsubj _
2 jsou být AUX VB-P---3P-AA--- Mood=Ind|Number=Plur|Person=3|Polarity=Pos|Tense=Pres|VerbForm=Fin|Voice=Act 3 cop 3:cop _
3 citelné citelný ADJ AAFP1----1A---- Case=Nom|Degree=Pos|Gender=Fem|Number=Plur|Polarity=Pos 0 root 0:root SpaceAfter=No
4 , , PUNCT Z:------------- _ 6 punct 6:punct _
5 je být AUX VB-S---3P-AA--- Mood=Ind|Number=Sing|Person=3|Polarity=Pos|Tense=Pres|VerbForm=Fin|Voice=Act 6 cop 6:cop _
6 třeba třeba ADJ ACNS------A---- Gender=Neut|Number=Sing|Polarity=Pos|Variant=Short 3 conj 0:root|3:conj LId=třeba-1
7 je on PRON PPXP4--3------- Case=Acc|Number=Plur|Person=3|PronType=Prs 9 obj 9:obj LId=on-1|LGloss=(oni/ono)
8 lépe dobře ADV Dg-------2A---- Degree=Cmp|Polarity=Pos 9 advmod 9:advmod _
9 prezentovat prezentovat VERB Vf--------A---- Polarity=Pos|VerbForm=Inf 6 csubj 6:csubj _
~~~
~~~ conllu
# visual-style 1 bgColor:blue
# visual-style 1 fgColor:white
# visual-style 7 bgColor:blue
# visual-style 7 fgColor:white
# visual-style 7 1 csubj color:blue
1 Stavět stavět VERB Vf--------A---- Aspect=Imp|Polarity=Pos|VerbForm=Inf 7 csubj 7:csubj _
2 vlastní vlastní ADJ AAFS4----1A---- Case=Acc|Degree=Pos|Gender=Fem|Number=Sing|Polarity=Pos 3 amod 3:amod LId=vlastní-1|LGloss=(příslušný_k_něčemu)
3 výtopnu výtopna NOUN NNFS4-----A---- Case=Acc|Gender=Fem|Number=Sing|Polarity=Pos 1 obj 1:obj _
4 je být AUX VB-S---3P-AA--- Mood=Ind|Number=Sing|Person=3|Polarity=Pos|Tense=Pres|VerbForm=Fin|Voice=Act 7 cop 7:cop _
5 prozatím prozatím ADV Db------------- _ 7 advmod 7:advmod _
6 patrně patrně ADV Dg-------1A---- Degree=Pos|Polarity=Pos 7 advmod 7:advmod LDeriv=patrný
7 risk risk NOUN NNIS1-----A---- Animacy=Inan|Case=Nom|Gender=Masc|Number=Sing|Polarity=Pos 0 root 0:root _
~~~
| {
"pile_set_name": "Github"
} |
[id="configuration-{beatname_lc}-options"]
== Configure inputs
++++
<titleabbrev>Inputs</titleabbrev>
++++
TIP: <<{beatname_lc}-modules-overview,{beatname_uc} modules>> provide the
fastest getting started experience for common log formats. See
<<{beatname_lc}-installation-configuration>> to learn how to get started.
To configure {beatname_uc} manually (instead of using
<<{beatname_lc}-modules-overview,modules>>), you specify a list of inputs in the
+{beatname_lc}.inputs+ section of the +{beatname_lc}.yml+. Inputs specify how
{beatname_uc} locates and processes input data.
The list is a http://yaml.org/[YAML] array, so each input begins with
a dash (`-`). You can specify multiple inputs, and you can specify the same
input type more than once. For example:
["source","yaml",subs="attributes"]
----
{beatname_lc}.inputs:
- type: log
paths:
- /var/log/system.log
- /var/log/wifi.log
- type: log
paths:
- "/var/log/apache2/*"
fields:
apache: true
fields_under_root: true
----
For the most basic configuration, define a single input with a single path. For
example:
[source,yaml]
-------------------------------------------------------------------------------------
filebeat.inputs:
- type: log
enabled: true
paths:
- /var/log/*.log
-------------------------------------------------------------------------------------
The input in this example harvests all files in the path `/var/log/*.log`, which
means that {beatname_uc} will harvest all files in the directory `/var/log/`
that end with `.log`. All patterns supported by
https://golang.org/pkg/path/filepath/#Glob[Go Glob] are also supported here.
To fetch all files from a predefined level of subdirectories, use this pattern:
`/var/log/*/*.log`. This fetches all `.log` files from the subfolders of
`/var/log`. It does not fetch log files from the `/var/log` folder itself.
Currently it is not possible to recursively fetch all files in all
subdirectories of a directory.
[float]
[id="{beatname_lc}-input-types"]
=== Input types
You can configure {beatname_uc} to use the following inputs:
* <<{beatname_lc}-input-azure-eventhub>>
* <<{beatname_lc}-input-cloudfoundry>>
* <<{beatname_lc}-input-container>>
* <<{beatname_lc}-input-docker>>
* <<{beatname_lc}-input-google-pubsub>>
* <<{beatname_lc}-input-http_endpoint>>
* <<{beatname_lc}-input-httpjson>>
* <<{beatname_lc}-input-kafka>>
* <<{beatname_lc}-input-log>>
* <<{beatname_lc}-input-mqtt>>
* <<{beatname_lc}-input-netflow>>
* <<{beatname_lc}-input-o365audit>>
* <<{beatname_lc}-input-redis>>
* <<{beatname_lc}-input-s3>>
* <<{beatname_lc}-input-stdin>>
* <<{beatname_lc}-input-syslog>>
* <<{beatname_lc}-input-tcp>>
* <<{beatname_lc}-input-udp>>
include::multiline.asciidoc[]
include::../../x-pack/filebeat/docs/inputs/input-awscloudwatch.asciidoc[]
include::../../x-pack/filebeat/docs/inputs/input-azure-eventhub.asciidoc[]
include::../../x-pack/filebeat/docs/inputs/input-cloudfoundry.asciidoc[]
include::inputs/input-container.asciidoc[]
include::inputs/input-docker.asciidoc[]
include::../../x-pack/filebeat/docs/inputs/input-google-pubsub.asciidoc[]
include::../../x-pack/filebeat/docs/inputs/input-http-endpoint.asciidoc[]
include::../../x-pack/filebeat/docs/inputs/input-httpjson.asciidoc[]
include::inputs/input-kafka.asciidoc[]
include::inputs/input-log.asciidoc[]
include::inputs/input-mqtt.asciidoc[]
include::../../x-pack/filebeat/docs/inputs/input-netflow.asciidoc[]
include::../../x-pack/filebeat/docs/inputs/input-o365audit.asciidoc[]
include::inputs/input-redis.asciidoc[]
include::../../x-pack/filebeat/docs/inputs/input-aws-s3.asciidoc[]
include::inputs/input-stdin.asciidoc[]
include::inputs/input-syslog.asciidoc[]
include::inputs/input-tcp.asciidoc[]
include::inputs/input-udp.asciidoc[]
include::inputs/input-unix.asciidoc[]
| {
"pile_set_name": "Github"
} |
// Created on: 2011-10-20
// Created by: Sergey ZERCHANINOV
// Copyright (c) 2011-2014 OPEN CASCADE SAS
//
// This file is part of Open CASCADE Technology software library.
//
// This library is free software; you can redistribute it and/or modify it under
// the terms of the GNU Lesser General Public License version 2.1 as published
// by the Free Software Foundation, with special exception defined in the file
// OCCT_LGPL_EXCEPTION.txt. Consult the file LICENSE_LGPL_21.txt included in OCCT
// distribution for complete text of the license and disclaimer of any warranty.
//
// Alternatively, this file may be used under the terms of Open CASCADE
// commercial license or contractual agreement.
#include <OpenGl_GraphicDriver.hxx>
#include <OpenGl_Group.hxx>
#include <OpenGl_PrimitiveArray.hxx>
#include <OpenGl_CView.hxx>
void OpenGl_GraphicDriver::SetDepthTestEnabled( const Graphic3d_CView& ACView, const Standard_Boolean isEnabled ) const
{
const OpenGl_CView *aCView = (const OpenGl_CView *)ACView.ptrView;
if (aCView)
aCView->WS->UseDepthTest() = isEnabled;
}
Standard_Boolean OpenGl_GraphicDriver::IsDepthTestEnabled( const Graphic3d_CView& ACView ) const
{
const OpenGl_CView *aCView = (const OpenGl_CView *)ACView.ptrView;
if (aCView)
return aCView->WS->UseDepthTest();
return Standard_False;
}
void OpenGl_GraphicDriver::ReadDepths( const Graphic3d_CView& ACView,
const Standard_Integer x,
const Standard_Integer y,
const Standard_Integer width,
const Standard_Integer height,
const Standard_Address buffer ) const
{
const OpenGl_CView *aCView = (const OpenGl_CView *)ACView.ptrView;
if (aCView)
aCView->WS->ReadDepths(x, y, width, height, (float*) buffer);
}
void OpenGl_GraphicDriver::SetGLLightEnabled( const Graphic3d_CView& ACView, const Standard_Boolean isEnabled ) const
{
const OpenGl_CView *aCView = (const OpenGl_CView *)ACView.ptrView;
if (aCView)
aCView->WS->UseGLLight() = isEnabled;
}
Standard_Boolean OpenGl_GraphicDriver::IsGLLightEnabled( const Graphic3d_CView& ACView ) const
{
const OpenGl_CView *aCView = (const OpenGl_CView *)ACView.ptrView;
if (aCView)
return aCView->WS->UseGLLight();
return Standard_False;
}
| {
"pile_set_name": "Github"
} |
---
enhancements:
- |
The ecs-agent's docker container name can now be set via the ``ecs_agent_container_name``
option or the ``DD_ECS_AGENT_CONTAINER_NAME`` envvar for autodetection.
| {
"pile_set_name": "Github"
} |
{
"images" : [
{
"idiom" : "universal",
"filename" : "lock.png",
"scale" : "1x"
},
{
"idiom" : "universal",
"filename" : "[email protected]",
"scale" : "2x"
},
{
"idiom" : "universal",
"filename" : "[email protected]",
"scale" : "3x"
}
],
"info" : {
"version" : 1,
"author" : "xcode"
}
} | {
"pile_set_name": "Github"
} |
#pragma warning disable 108 // new keyword hiding
#pragma warning disable 114 // new keyword hiding
namespace Windows.Data.Json
{
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented]
#endif
public partial class JsonObject : global::Windows.Data.Json.IJsonValue,global::System.Collections.Generic.IDictionary<string, global::Windows.Data.Json.IJsonValue>,global::System.Collections.Generic.IEnumerable<global::System.Collections.Generic.KeyValuePair<string, global::Windows.Data.Json.IJsonValue>>,global::Windows.Foundation.IStringable
{
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public global::Windows.Data.Json.JsonValueType ValueType
{
get
{
throw new global::System.NotImplementedException("The member JsonValueType JsonObject.ValueType is not implemented in Uno.");
}
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public uint Size
{
get
{
throw new global::System.NotImplementedException("The member uint JsonObject.Size is not implemented in Uno.");
}
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public JsonObject()
{
global::Windows.Foundation.Metadata.ApiInformation.TryRaiseNotImplemented("Windows.Data.Json.JsonObject", "JsonObject.JsonObject()");
}
#endif
// Forced skipping of method Windows.Data.Json.JsonObject.JsonObject()
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public global::Windows.Data.Json.JsonValue GetNamedValue( string name)
{
throw new global::System.NotImplementedException("The member JsonValue JsonObject.GetNamedValue(string name) is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public void SetNamedValue( string name, global::Windows.Data.Json.IJsonValue value)
{
global::Windows.Foundation.Metadata.ApiInformation.TryRaiseNotImplemented("Windows.Data.Json.JsonObject", "void JsonObject.SetNamedValue(string name, IJsonValue value)");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public global::Windows.Data.Json.JsonObject GetNamedObject( string name)
{
throw new global::System.NotImplementedException("The member JsonObject JsonObject.GetNamedObject(string name) is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public global::Windows.Data.Json.JsonArray GetNamedArray( string name)
{
throw new global::System.NotImplementedException("The member JsonArray JsonObject.GetNamedArray(string name) is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public string GetNamedString( string name)
{
throw new global::System.NotImplementedException("The member string JsonObject.GetNamedString(string name) is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public double GetNamedNumber( string name)
{
throw new global::System.NotImplementedException("The member double JsonObject.GetNamedNumber(string name) is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public bool GetNamedBoolean( string name)
{
throw new global::System.NotImplementedException("The member bool JsonObject.GetNamedBoolean(string name) is not implemented in Uno.");
}
#endif
// Forced skipping of method Windows.Data.Json.JsonObject.ValueType.get
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public string Stringify()
{
throw new global::System.NotImplementedException("The member string JsonObject.Stringify() is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public string GetString()
{
throw new global::System.NotImplementedException("The member string JsonObject.GetString() is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public double GetNumber()
{
throw new global::System.NotImplementedException("The member double JsonObject.GetNumber() is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public bool GetBoolean()
{
throw new global::System.NotImplementedException("The member bool JsonObject.GetBoolean() is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public global::Windows.Data.Json.JsonArray GetArray()
{
throw new global::System.NotImplementedException("The member JsonArray JsonObject.GetArray() is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public global::Windows.Data.Json.JsonObject GetObject()
{
throw new global::System.NotImplementedException("The member JsonObject JsonObject.GetObject() is not implemented in Uno.");
}
#endif
// Forced skipping of method Windows.Data.Json.JsonObject.Lookup(string)
// Forced skipping of method Windows.Data.Json.JsonObject.Size.get
// Forced skipping of method Windows.Data.Json.JsonObject.HasKey(string)
// Forced skipping of method Windows.Data.Json.JsonObject.GetView()
// Forced skipping of method Windows.Data.Json.JsonObject.Insert(string, Windows.Data.Json.IJsonValue)
// Forced skipping of method Windows.Data.Json.JsonObject.Remove(string)
// Forced skipping of method Windows.Data.Json.JsonObject.Clear()
// Forced skipping of method Windows.Data.Json.JsonObject.First()
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public global::Windows.Data.Json.JsonValue GetNamedValue( string name, global::Windows.Data.Json.JsonValue defaultValue)
{
throw new global::System.NotImplementedException("The member JsonValue JsonObject.GetNamedValue(string name, JsonValue defaultValue) is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public global::Windows.Data.Json.JsonObject GetNamedObject( string name, global::Windows.Data.Json.JsonObject defaultValue)
{
throw new global::System.NotImplementedException("The member JsonObject JsonObject.GetNamedObject(string name, JsonObject defaultValue) is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public string GetNamedString( string name, string defaultValue)
{
throw new global::System.NotImplementedException("The member string JsonObject.GetNamedString(string name, string defaultValue) is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public global::Windows.Data.Json.JsonArray GetNamedArray( string name, global::Windows.Data.Json.JsonArray defaultValue)
{
throw new global::System.NotImplementedException("The member JsonArray JsonObject.GetNamedArray(string name, JsonArray defaultValue) is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public double GetNamedNumber( string name, double defaultValue)
{
throw new global::System.NotImplementedException("The member double JsonObject.GetNamedNumber(string name, double defaultValue) is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public bool GetNamedBoolean( string name, bool defaultValue)
{
throw new global::System.NotImplementedException("The member bool JsonObject.GetNamedBoolean(string name, bool defaultValue) is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public override string ToString()
{
throw new global::System.NotImplementedException("The member string JsonObject.ToString() is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public static global::Windows.Data.Json.JsonObject Parse( string input)
{
throw new global::System.NotImplementedException("The member JsonObject JsonObject.Parse(string input) is not implemented in Uno.");
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public static bool TryParse( string input, out global::Windows.Data.Json.JsonObject result)
{
throw new global::System.NotImplementedException("The member bool JsonObject.TryParse(string input, out JsonObject result) is not implemented in Uno.");
}
#endif
// Processing: Windows.Data.Json.IJsonValue
// Processing: System.Collections.Generic.IDictionary<string, Windows.Data.Json.IJsonValue>
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
// DeclaringType: System.Collections.Generic.IDictionary<string, Windows.Data.Json.IJsonValue>
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public void Add( string key, global::Windows.Data.Json.IJsonValue value)
{
throw new global::System.NotSupportedException();
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
// DeclaringType: System.Collections.Generic.IDictionary<string, Windows.Data.Json.IJsonValue>
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public bool ContainsKey( string key)
{
throw new global::System.NotSupportedException();
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
// DeclaringType: System.Collections.Generic.IDictionary<string, Windows.Data.Json.IJsonValue>
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public bool Remove( string key)
{
throw new global::System.NotSupportedException();
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
// DeclaringType: System.Collections.Generic.IDictionary<string, Windows.Data.Json.IJsonValue>
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public bool TryGetValue( string key, out global::Windows.Data.Json.IJsonValue value)
{
throw new global::System.NotSupportedException();
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public global::Windows.Data.Json.IJsonValue this[string key]
{
get
{
throw new global::System.NotSupportedException();
}
set
{
throw new global::System.NotSupportedException();
}
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public global::System.Collections.Generic.ICollection<string> Keys
{
get
{
throw new global::System.NotSupportedException();
}
set
{
throw new global::System.NotSupportedException();
}
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public global::System.Collections.Generic.ICollection<global::Windows.Data.Json.IJsonValue> Values
{
get
{
throw new global::System.NotSupportedException();
}
set
{
throw new global::System.NotSupportedException();
}
}
#endif
// Processing: System.Collections.Generic.ICollection<System.Collections.Generic.KeyValuePair<string, Windows.Data.Json.IJsonValue>>
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
// DeclaringType: System.Collections.Generic.ICollection<System.Collections.Generic.KeyValuePair<string, Windows.Data.Json.IJsonValue>>
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public void Add( global::System.Collections.Generic.KeyValuePair<string, global::Windows.Data.Json.IJsonValue> item)
{
throw new global::System.NotSupportedException();
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
// DeclaringType: System.Collections.Generic.ICollection<System.Collections.Generic.KeyValuePair<string, Windows.Data.Json.IJsonValue>>
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public void Clear()
{
throw new global::System.NotSupportedException();
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
// DeclaringType: System.Collections.Generic.ICollection<System.Collections.Generic.KeyValuePair<string, Windows.Data.Json.IJsonValue>>
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public bool Contains( global::System.Collections.Generic.KeyValuePair<string, global::Windows.Data.Json.IJsonValue> item)
{
throw new global::System.NotSupportedException();
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
// DeclaringType: System.Collections.Generic.ICollection<System.Collections.Generic.KeyValuePair<string, Windows.Data.Json.IJsonValue>>
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public void CopyTo( global::System.Collections.Generic.KeyValuePair<string, global::Windows.Data.Json.IJsonValue>[] array, int arrayIndex)
{
throw new global::System.NotSupportedException();
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
// DeclaringType: System.Collections.Generic.ICollection<System.Collections.Generic.KeyValuePair<string, Windows.Data.Json.IJsonValue>>
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public bool Remove( global::System.Collections.Generic.KeyValuePair<string, global::Windows.Data.Json.IJsonValue> item)
{
throw new global::System.NotSupportedException();
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public int Count
{
get
{
throw new global::System.NotSupportedException();
}
set
{
throw new global::System.NotSupportedException();
}
}
#endif
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public bool IsReadOnly
{
get
{
throw new global::System.NotSupportedException();
}
set
{
throw new global::System.NotSupportedException();
}
}
#endif
// Processing: System.Collections.Generic.IEnumerable<System.Collections.Generic.KeyValuePair<string, Windows.Data.Json.IJsonValue>>
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
// DeclaringType: System.Collections.Generic.IEnumerable<System.Collections.Generic.KeyValuePair<string, Windows.Data.Json.IJsonValue>>
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
public global::System.Collections.Generic.IEnumerator<global::System.Collections.Generic.KeyValuePair<string, global::Windows.Data.Json.IJsonValue>> GetEnumerator()
{
throw new global::System.NotSupportedException();
}
#endif
// Processing: System.Collections.IEnumerable
#if __ANDROID__ || __IOS__ || NET461 || __WASM__ || __SKIA__ || __NETSTD_REFERENCE__ || __MACOS__
// DeclaringType: System.Collections.IEnumerable
[global::Uno.NotImplemented("__ANDROID__", "__IOS__", "NET461", "__WASM__", "__SKIA__", "__NETSTD_REFERENCE__", "__MACOS__")]
global::System.Collections.IEnumerator global::System.Collections.IEnumerable.GetEnumerator()
{
throw new global::System.NotSupportedException();
}
#endif
}
}
| {
"pile_set_name": "Github"
} |
/*
QMPlay2 is a video and audio player.
Copyright (C) 2010-2020 Błażej Szczygieł
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <ImgScaler.hpp>
#include <Frame.hpp>
#ifdef USE_VULKAN
# include "../qmvk/Image.hpp"
#endif
extern "C"
{
#include <libswscale/swscale.h>
}
ImgScaler::ImgScaler() :
m_swsCtx(nullptr),
m_srcH(0), m_dstLinesize(0)
{}
bool ImgScaler::create(const Frame &videoFrame, int newWdst, int newHdst)
{
if (newWdst < 0)
newWdst = videoFrame.width();
if (newHdst < 0)
newHdst = videoFrame.height();
m_srcH = videoFrame.height();
m_dstLinesize = newWdst << 2;
m_swsCtx = sws_getCachedContext(
m_swsCtx,
videoFrame.width(),
m_srcH,
videoFrame.pixelFormat(),
newWdst,
newHdst,
AV_PIX_FMT_RGB32,
SWS_BILINEAR,
nullptr,
nullptr,
nullptr
);
return (bool)m_swsCtx;
}
bool ImgScaler::scale(const Frame &src, void *dst)
{
const int numPlanes = src.numPlanes();
const uint8_t *srcData[3] = {};
auto swsScale = [&](int *srcLinesize) {
sws_scale(m_swsCtx, srcData, srcLinesize, 0, m_srcH, (uint8_t **)&dst, &m_dstLinesize);
};
if (src.hasCPUAccess())
{
for (int i = 0; i < numPlanes; ++i)
srcData[i] = src.constData(i);
swsScale(src.linesize());
return true;
}
#ifdef USE_VULKAN
else if (auto vkImage = src.vulkanImage()) try
{
auto hostVkImage = QmVk::Image::createLinear(
vkImage->device(),
vk::Extent2D(src.width(), src.height()),
vkImage->format()
);
vkImage->copyTo(hostVkImage);
for (int i = 0; i < numPlanes; ++i)
srcData[i] = hostVkImage->map<uint8_t>(i);
int srcLinesize[3] = {};
for (int i = 0; i < numPlanes; ++i)
srcLinesize[i] = hostVkImage->linesize(i);
swsScale(srcLinesize);
return true;
}
catch (const vk::SystemError &e)
{
Q_UNUSED(e)
}
#endif
return false;
}
void ImgScaler::scale(const void *src[], const int srcLinesize[], void *dst)
{
sws_scale(m_swsCtx, (const uint8_t **)src, srcLinesize, 0, m_srcH, (uint8_t **)&dst, &m_dstLinesize);
}
void ImgScaler::destroy()
{
if (m_swsCtx)
{
sws_freeContext(m_swsCtx);
m_swsCtx = nullptr;
}
}
| {
"pile_set_name": "Github"
} |
/*
* linux/drivers/net/wireless/libertas/if_spi.c
*
* Driver for Marvell SPI WLAN cards.
*
* Copyright 2008 Analog Devices Inc.
*
* Authors:
* Andrey Yurovsky <[email protected]>
* Colin McCabe <[email protected]>
*
* Inspired by if_sdio.c, Copyright 2007-2008 Pierre Ossman
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/hardirq.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/jiffies.h>
#include <linux/list.h>
#include <linux/netdevice.h>
#include <linux/slab.h>
#include <linux/spi/libertas_spi.h>
#include <linux/spi/spi.h>
#include "host.h"
#include "decl.h"
#include "defs.h"
#include "dev.h"
#include "if_spi.h"
struct if_spi_packet {
struct list_head list;
u16 blen;
u8 buffer[0] __attribute__((aligned(4)));
};
struct if_spi_card {
struct spi_device *spi;
struct lbs_private *priv;
struct libertas_spi_platform_data *pdata;
/* The card ID and card revision, as reported by the hardware. */
u16 card_id;
u8 card_rev;
/* The last time that we initiated an SPU operation */
unsigned long prev_xfer_time;
int use_dummy_writes;
unsigned long spu_port_delay;
unsigned long spu_reg_delay;
/* Handles all SPI communication (except for FW load) */
struct workqueue_struct *workqueue;
struct work_struct packet_work;
struct work_struct resume_work;
u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE];
/* A buffer of incoming packets from libertas core.
* Since we can't sleep in hw_host_to_card, we have to buffer
* them. */
struct list_head cmd_packet_list;
struct list_head data_packet_list;
/* Protects cmd_packet_list and data_packet_list */
spinlock_t buffer_lock;
/* True is card suspended */
u8 suspended;
};
static void free_if_spi_card(struct if_spi_card *card)
{
struct list_head *cursor, *next;
struct if_spi_packet *packet;
list_for_each_safe(cursor, next, &card->cmd_packet_list) {
packet = container_of(cursor, struct if_spi_packet, list);
list_del(&packet->list);
kfree(packet);
}
list_for_each_safe(cursor, next, &card->data_packet_list) {
packet = container_of(cursor, struct if_spi_packet, list);
list_del(&packet->list);
kfree(packet);
}
kfree(card);
}
#define MODEL_8385 0x04
#define MODEL_8686 0x0b
#define MODEL_8688 0x10
static const struct lbs_fw_table fw_table[] = {
{ MODEL_8385, "libertas/gspi8385_helper.bin", "libertas/gspi8385.bin" },
{ MODEL_8385, "libertas/gspi8385_hlp.bin", "libertas/gspi8385.bin" },
{ MODEL_8686, "libertas/gspi8686_v9_helper.bin", "libertas/gspi8686_v9.bin" },
{ MODEL_8686, "libertas/gspi8686_hlp.bin", "libertas/gspi8686.bin" },
{ MODEL_8688, "libertas/gspi8688_helper.bin", "libertas/gspi8688.bin" },
{ 0, NULL, NULL }
};
MODULE_FIRMWARE("libertas/gspi8385_helper.bin");
MODULE_FIRMWARE("libertas/gspi8385_hlp.bin");
MODULE_FIRMWARE("libertas/gspi8385.bin");
MODULE_FIRMWARE("libertas/gspi8686_v9_helper.bin");
MODULE_FIRMWARE("libertas/gspi8686_v9.bin");
MODULE_FIRMWARE("libertas/gspi8686_hlp.bin");
MODULE_FIRMWARE("libertas/gspi8686.bin");
MODULE_FIRMWARE("libertas/gspi8688_helper.bin");
MODULE_FIRMWARE("libertas/gspi8688.bin");
/*
* SPI Interface Unit Routines
*
* The SPU sits between the host and the WLAN module.
* All communication with the firmware is through SPU transactions.
*
* First we have to put a SPU register name on the bus. Then we can
* either read from or write to that register.
*
*/
static void spu_transaction_init(struct if_spi_card *card)
{
if (!time_after(jiffies, card->prev_xfer_time + 1)) {
/* Unfortunately, the SPU requires a delay between successive
* transactions. If our last transaction was more than a jiffy
* ago, we have obviously already delayed enough.
* If not, we have to busy-wait to be on the safe side. */
ndelay(400);
}
}
static void spu_transaction_finish(struct if_spi_card *card)
{
card->prev_xfer_time = jiffies;
}
/*
* Write out a byte buffer to an SPI register,
* using a series of 16-bit transfers.
*/
static int spu_write(struct if_spi_card *card, u16 reg, const u8 *buf, int len)
{
int err = 0;
__le16 reg_out = cpu_to_le16(reg | IF_SPI_WRITE_OPERATION_MASK);
struct spi_message m;
struct spi_transfer reg_trans;
struct spi_transfer data_trans;
spi_message_init(&m);
memset(®_trans, 0, sizeof(reg_trans));
memset(&data_trans, 0, sizeof(data_trans));
/* You must give an even number of bytes to the SPU, even if it
* doesn't care about the last one. */
BUG_ON(len & 0x1);
spu_transaction_init(card);
/* write SPU register index */
reg_trans.tx_buf = ®_out;
reg_trans.len = sizeof(reg_out);
data_trans.tx_buf = buf;
data_trans.len = len;
spi_message_add_tail(®_trans, &m);
spi_message_add_tail(&data_trans, &m);
err = spi_sync(card->spi, &m);
spu_transaction_finish(card);
return err;
}
static inline int spu_write_u16(struct if_spi_card *card, u16 reg, u16 val)
{
__le16 buff;
buff = cpu_to_le16(val);
return spu_write(card, reg, (u8 *)&buff, sizeof(u16));
}
static inline int spu_reg_is_port_reg(u16 reg)
{
switch (reg) {
case IF_SPI_IO_RDWRPORT_REG:
case IF_SPI_CMD_RDWRPORT_REG:
case IF_SPI_DATA_RDWRPORT_REG:
return 1;
default:
return 0;
}
}
static int spu_read(struct if_spi_card *card, u16 reg, u8 *buf, int len)
{
unsigned int delay;
int err = 0;
__le16 reg_out = cpu_to_le16(reg | IF_SPI_READ_OPERATION_MASK);
struct spi_message m;
struct spi_transfer reg_trans;
struct spi_transfer dummy_trans;
struct spi_transfer data_trans;
/*
* You must take an even number of bytes from the SPU, even if you
* don't care about the last one.
*/
BUG_ON(len & 0x1);
spu_transaction_init(card);
spi_message_init(&m);
memset(®_trans, 0, sizeof(reg_trans));
memset(&dummy_trans, 0, sizeof(dummy_trans));
memset(&data_trans, 0, sizeof(data_trans));
/* write SPU register index */
reg_trans.tx_buf = ®_out;
reg_trans.len = sizeof(reg_out);
spi_message_add_tail(®_trans, &m);
delay = spu_reg_is_port_reg(reg) ? card->spu_port_delay :
card->spu_reg_delay;
if (card->use_dummy_writes) {
/* Clock in dummy cycles while the SPU fills the FIFO */
dummy_trans.len = delay / 8;
spi_message_add_tail(&dummy_trans, &m);
} else {
/* Busy-wait while the SPU fills the FIFO */
reg_trans.delay_usecs =
DIV_ROUND_UP((100 + (delay * 10)), 1000);
}
/* read in data */
data_trans.rx_buf = buf;
data_trans.len = len;
spi_message_add_tail(&data_trans, &m);
err = spi_sync(card->spi, &m);
spu_transaction_finish(card);
return err;
}
/* Read 16 bits from an SPI register */
static inline int spu_read_u16(struct if_spi_card *card, u16 reg, u16 *val)
{
__le16 buf;
int ret;
ret = spu_read(card, reg, (u8 *)&buf, sizeof(buf));
if (ret == 0)
*val = le16_to_cpup(&buf);
return ret;
}
/*
* Read 32 bits from an SPI register.
* The low 16 bits are read first.
*/
static int spu_read_u32(struct if_spi_card *card, u16 reg, u32 *val)
{
__le32 buf;
int err;
err = spu_read(card, reg, (u8 *)&buf, sizeof(buf));
if (!err)
*val = le32_to_cpup(&buf);
return err;
}
/*
* Keep reading 16 bits from an SPI register until you get the correct result.
*
* If mask = 0, the correct result is any non-zero number.
* If mask != 0, the correct result is any number where
* number & target_mask == target
*
* Returns -ETIMEDOUT if a second passes without the correct result.
*/
static int spu_wait_for_u16(struct if_spi_card *card, u16 reg,
u16 target_mask, u16 target)
{
int err;
unsigned long timeout = jiffies + 5*HZ;
while (1) {
u16 val;
err = spu_read_u16(card, reg, &val);
if (err)
return err;
if (target_mask) {
if ((val & target_mask) == target)
return 0;
} else {
if (val)
return 0;
}
udelay(100);
if (time_after(jiffies, timeout)) {
pr_err("%s: timeout with val=%02x, target_mask=%02x, target=%02x\n",
__func__, val, target_mask, target);
return -ETIMEDOUT;
}
}
}
/*
* Read 16 bits from an SPI register until you receive a specific value.
* Returns -ETIMEDOUT if a 4 tries pass without success.
*/
static int spu_wait_for_u32(struct if_spi_card *card, u32 reg, u32 target)
{
int err, try;
for (try = 0; try < 4; ++try) {
u32 val = 0;
err = spu_read_u32(card, reg, &val);
if (err)
return err;
if (val == target)
return 0;
mdelay(100);
}
return -ETIMEDOUT;
}
static int spu_set_interrupt_mode(struct if_spi_card *card,
int suppress_host_int,
int auto_int)
{
int err = 0;
/*
* We can suppress a host interrupt by clearing the appropriate
* bit in the "host interrupt status mask" register
*/
if (suppress_host_int) {
err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_MASK_REG, 0);
if (err)
return err;
} else {
err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_MASK_REG,
IF_SPI_HISM_TX_DOWNLOAD_RDY |
IF_SPI_HISM_RX_UPLOAD_RDY |
IF_SPI_HISM_CMD_DOWNLOAD_RDY |
IF_SPI_HISM_CARDEVENT |
IF_SPI_HISM_CMD_UPLOAD_RDY);
if (err)
return err;
}
/*
* If auto-interrupts are on, the completion of certain transactions
* will trigger an interrupt automatically. If auto-interrupts
* are off, we need to set the "Card Interrupt Cause" register to
* trigger a card interrupt.
*/
if (auto_int) {
err = spu_write_u16(card, IF_SPI_HOST_INT_CTRL_REG,
IF_SPI_HICT_TX_DOWNLOAD_OVER_AUTO |
IF_SPI_HICT_RX_UPLOAD_OVER_AUTO |
IF_SPI_HICT_CMD_DOWNLOAD_OVER_AUTO |
IF_SPI_HICT_CMD_UPLOAD_OVER_AUTO);
if (err)
return err;
} else {
err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_MASK_REG, 0);
if (err)
return err;
}
return err;
}
static int spu_get_chip_revision(struct if_spi_card *card,
u16 *card_id, u8 *card_rev)
{
int err = 0;
u32 dev_ctrl;
err = spu_read_u32(card, IF_SPI_DEVICEID_CTRL_REG, &dev_ctrl);
if (err)
return err;
*card_id = IF_SPI_DEVICEID_CTRL_REG_TO_CARD_ID(dev_ctrl);
*card_rev = IF_SPI_DEVICEID_CTRL_REG_TO_CARD_REV(dev_ctrl);
return err;
}
static int spu_set_bus_mode(struct if_spi_card *card, u16 mode)
{
int err = 0;
u16 rval;
/* set bus mode */
err = spu_write_u16(card, IF_SPI_SPU_BUS_MODE_REG, mode);
if (err)
return err;
/* Check that we were able to read back what we just wrote. */
err = spu_read_u16(card, IF_SPI_SPU_BUS_MODE_REG, &rval);
if (err)
return err;
if ((rval & 0xF) != mode) {
pr_err("Can't read bus mode register\n");
return -EIO;
}
return 0;
}
static int spu_init(struct if_spi_card *card, int use_dummy_writes)
{
int err = 0;
u32 delay;
/*
* We have to start up in timed delay mode so that we can safely
* read the Delay Read Register.
*/
card->use_dummy_writes = 0;
err = spu_set_bus_mode(card,
IF_SPI_BUS_MODE_SPI_CLOCK_PHASE_RISING |
IF_SPI_BUS_MODE_DELAY_METHOD_TIMED |
IF_SPI_BUS_MODE_16_BIT_ADDRESS_16_BIT_DATA);
if (err)
return err;
card->spu_port_delay = 1000;
card->spu_reg_delay = 1000;
err = spu_read_u32(card, IF_SPI_DELAY_READ_REG, &delay);
if (err)
return err;
card->spu_port_delay = delay & 0x0000ffff;
card->spu_reg_delay = (delay & 0xffff0000) >> 16;
/* If dummy clock delay mode has been requested, switch to it now */
if (use_dummy_writes) {
card->use_dummy_writes = 1;
err = spu_set_bus_mode(card,
IF_SPI_BUS_MODE_SPI_CLOCK_PHASE_RISING |
IF_SPI_BUS_MODE_DELAY_METHOD_DUMMY_CLOCK |
IF_SPI_BUS_MODE_16_BIT_ADDRESS_16_BIT_DATA);
if (err)
return err;
}
lbs_deb_spi("Initialized SPU unit. "
"spu_port_delay=0x%04lx, spu_reg_delay=0x%04lx\n",
card->spu_port_delay, card->spu_reg_delay);
return err;
}
/*
* Firmware Loading
*/
static int if_spi_prog_helper_firmware(struct if_spi_card *card,
const struct firmware *firmware)
{
int err = 0;
int bytes_remaining;
const u8 *fw;
u8 temp[HELPER_FW_LOAD_CHUNK_SZ];
lbs_deb_enter(LBS_DEB_SPI);
err = spu_set_interrupt_mode(card, 1, 0);
if (err)
goto out;
bytes_remaining = firmware->size;
fw = firmware->data;
/* Load helper firmware image */
while (bytes_remaining > 0) {
/*
* Scratch pad 1 should contain the number of bytes we
* want to download to the firmware
*/
err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG,
HELPER_FW_LOAD_CHUNK_SZ);
if (err)
goto out;
err = spu_wait_for_u16(card, IF_SPI_HOST_INT_STATUS_REG,
IF_SPI_HIST_CMD_DOWNLOAD_RDY,
IF_SPI_HIST_CMD_DOWNLOAD_RDY);
if (err)
goto out;
/*
* Feed the data into the command read/write port reg
* in chunks of 64 bytes
*/
memset(temp, 0, sizeof(temp));
memcpy(temp, fw,
min(bytes_remaining, HELPER_FW_LOAD_CHUNK_SZ));
mdelay(10);
err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG,
temp, HELPER_FW_LOAD_CHUNK_SZ);
if (err)
goto out;
/* Interrupt the boot code */
err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0);
if (err)
goto out;
err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG,
IF_SPI_CIC_CMD_DOWNLOAD_OVER);
if (err)
goto out;
bytes_remaining -= HELPER_FW_LOAD_CHUNK_SZ;
fw += HELPER_FW_LOAD_CHUNK_SZ;
}
/*
* Once the helper / single stage firmware download is complete,
* write 0 to scratch pad 1 and interrupt the
* bootloader. This completes the helper download.
*/
err = spu_write_u16(card, IF_SPI_SCRATCH_1_REG, FIRMWARE_DNLD_OK);
if (err)
goto out;
err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0);
if (err)
goto out;
err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG,
IF_SPI_CIC_CMD_DOWNLOAD_OVER);
out:
if (err)
pr_err("failed to load helper firmware (err=%d)\n", err);
lbs_deb_leave_args(LBS_DEB_SPI, "err %d", err);
return err;
}
/*
* Returns the length of the next packet the firmware expects us to send.
* Sets crc_err if the previous transfer had a CRC error.
*/
static int if_spi_prog_main_firmware_check_len(struct if_spi_card *card,
int *crc_err)
{
u16 len;
int err = 0;
/*
* wait until the host interrupt status register indicates
* that we are ready to download
*/
err = spu_wait_for_u16(card, IF_SPI_HOST_INT_STATUS_REG,
IF_SPI_HIST_CMD_DOWNLOAD_RDY,
IF_SPI_HIST_CMD_DOWNLOAD_RDY);
if (err) {
pr_err("timed out waiting for host_int_status\n");
return err;
}
/* Ask the device how many bytes of firmware it wants. */
err = spu_read_u16(card, IF_SPI_SCRATCH_1_REG, &len);
if (err)
return err;
if (len > IF_SPI_CMD_BUF_SIZE) {
pr_err("firmware load device requested a larger transfer than we are prepared to handle (len = %d)\n",
len);
return -EIO;
}
if (len & 0x1) {
lbs_deb_spi("%s: crc error\n", __func__);
len &= ~0x1;
*crc_err = 1;
} else
*crc_err = 0;
return len;
}
static int if_spi_prog_main_firmware(struct if_spi_card *card,
const struct firmware *firmware)
{
struct lbs_private *priv = card->priv;
int len, prev_len;
int bytes, crc_err = 0, err = 0;
const u8 *fw;
u16 num_crc_errs;
lbs_deb_enter(LBS_DEB_SPI);
err = spu_set_interrupt_mode(card, 1, 0);
if (err)
goto out;
err = spu_wait_for_u16(card, IF_SPI_SCRATCH_1_REG, 0, 0);
if (err) {
netdev_err(priv->dev,
"%s: timed out waiting for initial scratch reg = 0\n",
__func__);
goto out;
}
num_crc_errs = 0;
prev_len = 0;
bytes = firmware->size;
fw = firmware->data;
while ((len = if_spi_prog_main_firmware_check_len(card, &crc_err))) {
if (len < 0) {
err = len;
goto out;
}
if (bytes < 0) {
/*
* If there are no more bytes left, we would normally
* expect to have terminated with len = 0
*/
netdev_err(priv->dev,
"Firmware load wants more bytes than we have to offer.\n");
break;
}
if (crc_err) {
/* Previous transfer failed. */
if (++num_crc_errs > MAX_MAIN_FW_LOAD_CRC_ERR) {
pr_err("Too many CRC errors encountered in firmware load.\n");
err = -EIO;
goto out;
}
} else {
/* Previous transfer succeeded. Advance counters. */
bytes -= prev_len;
fw += prev_len;
}
if (bytes < len) {
memset(card->cmd_buffer, 0, len);
memcpy(card->cmd_buffer, fw, bytes);
} else
memcpy(card->cmd_buffer, fw, len);
err = spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG, 0);
if (err)
goto out;
err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG,
card->cmd_buffer, len);
if (err)
goto out;
err = spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG ,
IF_SPI_CIC_CMD_DOWNLOAD_OVER);
if (err)
goto out;
prev_len = len;
}
if (bytes > prev_len) {
pr_err("firmware load wants fewer bytes than we have to offer\n");
}
/* Confirm firmware download */
err = spu_wait_for_u32(card, IF_SPI_SCRATCH_4_REG,
SUCCESSFUL_FW_DOWNLOAD_MAGIC);
if (err) {
pr_err("failed to confirm the firmware download\n");
goto out;
}
out:
if (err)
pr_err("failed to load firmware (err=%d)\n", err);
lbs_deb_leave_args(LBS_DEB_SPI, "err %d", err);
return err;
}
/*
* SPI Transfer Thread
*
* The SPI worker handles all SPI transfers, so there is no need for a lock.
*/
/* Move a command from the card to the host */
static int if_spi_c2h_cmd(struct if_spi_card *card)
{
struct lbs_private *priv = card->priv;
unsigned long flags;
int err = 0;
u16 len;
u8 i;
/*
* We need a buffer big enough to handle whatever people send to
* hw_host_to_card
*/
BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE < LBS_CMD_BUFFER_SIZE);
BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE < LBS_UPLD_SIZE);
/*
* It's just annoying if the buffer size isn't a multiple of 4, because
* then we might have len < IF_SPI_CMD_BUF_SIZE but
* ALIGN(len, 4) > IF_SPI_CMD_BUF_SIZE
*/
BUILD_BUG_ON(IF_SPI_CMD_BUF_SIZE % 4 != 0);
lbs_deb_enter(LBS_DEB_SPI);
/* How many bytes are there to read? */
err = spu_read_u16(card, IF_SPI_SCRATCH_2_REG, &len);
if (err)
goto out;
if (!len) {
netdev_err(priv->dev, "%s: error: card has no data for host\n",
__func__);
err = -EINVAL;
goto out;
} else if (len > IF_SPI_CMD_BUF_SIZE) {
netdev_err(priv->dev,
"%s: error: response packet too large: %d bytes, but maximum is %d\n",
__func__, len, IF_SPI_CMD_BUF_SIZE);
err = -EINVAL;
goto out;
}
/* Read the data from the WLAN module into our command buffer */
err = spu_read(card, IF_SPI_CMD_RDWRPORT_REG,
card->cmd_buffer, ALIGN(len, 4));
if (err)
goto out;
spin_lock_irqsave(&priv->driver_lock, flags);
i = (priv->resp_idx == 0) ? 1 : 0;
BUG_ON(priv->resp_len[i]);
priv->resp_len[i] = len;
memcpy(priv->resp_buf[i], card->cmd_buffer, len);
lbs_notify_command_response(priv, i);
spin_unlock_irqrestore(&priv->driver_lock, flags);
out:
if (err)
netdev_err(priv->dev, "%s: err=%d\n", __func__, err);
lbs_deb_leave(LBS_DEB_SPI);
return err;
}
/* Move data from the card to the host */
static int if_spi_c2h_data(struct if_spi_card *card)
{
struct lbs_private *priv = card->priv;
struct sk_buff *skb;
char *data;
u16 len;
int err = 0;
lbs_deb_enter(LBS_DEB_SPI);
/* How many bytes are there to read? */
err = spu_read_u16(card, IF_SPI_SCRATCH_1_REG, &len);
if (err)
goto out;
if (!len) {
netdev_err(priv->dev, "%s: error: card has no data for host\n",
__func__);
err = -EINVAL;
goto out;
} else if (len > MRVDRV_ETH_RX_PACKET_BUFFER_SIZE) {
netdev_err(priv->dev,
"%s: error: card has %d bytes of data, but our maximum skb size is %zu\n",
__func__, len, MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
err = -EINVAL;
goto out;
}
/* TODO: should we allocate a smaller skb if we have less data? */
skb = dev_alloc_skb(MRVDRV_ETH_RX_PACKET_BUFFER_SIZE);
if (!skb) {
err = -ENOBUFS;
goto out;
}
skb_reserve(skb, IPFIELD_ALIGN_OFFSET);
data = skb_put(skb, len);
/* Read the data from the WLAN module into our skb... */
err = spu_read(card, IF_SPI_DATA_RDWRPORT_REG, data, ALIGN(len, 4));
if (err)
goto free_skb;
/* pass the SKB to libertas */
err = lbs_process_rxed_packet(card->priv, skb);
if (err)
goto free_skb;
/* success */
goto out;
free_skb:
dev_kfree_skb(skb);
out:
if (err)
netdev_err(priv->dev, "%s: err=%d\n", __func__, err);
lbs_deb_leave(LBS_DEB_SPI);
return err;
}
/* Move data or a command from the host to the card. */
static void if_spi_h2c(struct if_spi_card *card,
struct if_spi_packet *packet, int type)
{
struct lbs_private *priv = card->priv;
int err = 0;
u16 int_type, port_reg;
switch (type) {
case MVMS_DAT:
int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER;
port_reg = IF_SPI_DATA_RDWRPORT_REG;
break;
case MVMS_CMD:
int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER;
port_reg = IF_SPI_CMD_RDWRPORT_REG;
break;
default:
netdev_err(priv->dev, "can't transfer buffer of type %d\n",
type);
err = -EINVAL;
goto out;
}
/* Write the data to the card */
err = spu_write(card, port_reg, packet->buffer, packet->blen);
if (err)
goto out;
out:
kfree(packet);
if (err)
netdev_err(priv->dev, "%s: error %d\n", __func__, err);
}
/* Inform the host about a card event */
static void if_spi_e2h(struct if_spi_card *card)
{
int err = 0;
u32 cause;
struct lbs_private *priv = card->priv;
err = spu_read_u32(card, IF_SPI_SCRATCH_3_REG, &cause);
if (err)
goto out;
/* re-enable the card event interrupt */
spu_write_u16(card, IF_SPI_HOST_INT_STATUS_REG,
~IF_SPI_HICU_CARD_EVENT);
/* generate a card interrupt */
spu_write_u16(card, IF_SPI_CARD_INT_CAUSE_REG, IF_SPI_CIC_HOST_EVENT);
lbs_queue_event(priv, cause & 0xff);
out:
if (err)
netdev_err(priv->dev, "%s: error %d\n", __func__, err);
}
static void if_spi_host_to_card_worker(struct work_struct *work)
{
int err;
struct if_spi_card *card;
u16 hiStatus;
unsigned long flags;
struct if_spi_packet *packet;
struct lbs_private *priv;
card = container_of(work, struct if_spi_card, packet_work);
priv = card->priv;
lbs_deb_enter(LBS_DEB_SPI);
/*
* Read the host interrupt status register to see what we
* can do.
*/
err = spu_read_u16(card, IF_SPI_HOST_INT_STATUS_REG,
&hiStatus);
if (err) {
netdev_err(priv->dev, "I/O error\n");
goto err;
}
if (hiStatus & IF_SPI_HIST_CMD_UPLOAD_RDY) {
err = if_spi_c2h_cmd(card);
if (err)
goto err;
}
if (hiStatus & IF_SPI_HIST_RX_UPLOAD_RDY) {
err = if_spi_c2h_data(card);
if (err)
goto err;
}
/*
* workaround: in PS mode, the card does not set the Command
* Download Ready bit, but it sets TX Download Ready.
*/
if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
(card->priv->psstate != PS_STATE_FULL_POWER &&
(hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
/*
* This means two things. First of all,
* if there was a previous command sent, the card has
* successfully received it.
* Secondly, it is now ready to download another
* command.
*/
lbs_host_to_card_done(card->priv);
/* Do we have any command packets from the host to send? */
packet = NULL;
spin_lock_irqsave(&card->buffer_lock, flags);
if (!list_empty(&card->cmd_packet_list)) {
packet = (struct if_spi_packet *)(card->
cmd_packet_list.next);
list_del(&packet->list);
}
spin_unlock_irqrestore(&card->buffer_lock, flags);
if (packet)
if_spi_h2c(card, packet, MVMS_CMD);
}
if (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY) {
/* Do we have any data packets from the host to send? */
packet = NULL;
spin_lock_irqsave(&card->buffer_lock, flags);
if (!list_empty(&card->data_packet_list)) {
packet = (struct if_spi_packet *)(card->
data_packet_list.next);
list_del(&packet->list);
}
spin_unlock_irqrestore(&card->buffer_lock, flags);
if (packet)
if_spi_h2c(card, packet, MVMS_DAT);
}
if (hiStatus & IF_SPI_HIST_CARD_EVENT)
if_spi_e2h(card);
err:
if (err)
netdev_err(priv->dev, "%s: got error %d\n", __func__, err);
lbs_deb_leave(LBS_DEB_SPI);
}
/*
* Host to Card
*
* Called from Libertas to transfer some data to the WLAN device
* We can't sleep here.
*/
static int if_spi_host_to_card(struct lbs_private *priv,
u8 type, u8 *buf, u16 nb)
{
int err = 0;
unsigned long flags;
struct if_spi_card *card = priv->card;
struct if_spi_packet *packet;
u16 blen;
lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb);
if (nb == 0) {
netdev_err(priv->dev, "%s: invalid size requested: %d\n",
__func__, nb);
err = -EINVAL;
goto out;
}
blen = ALIGN(nb, 4);
packet = kzalloc(sizeof(struct if_spi_packet) + blen, GFP_ATOMIC);
if (!packet) {
err = -ENOMEM;
goto out;
}
packet->blen = blen;
memcpy(packet->buffer, buf, nb);
memset(packet->buffer + nb, 0, blen - nb);
switch (type) {
case MVMS_CMD:
priv->dnld_sent = DNLD_CMD_SENT;
spin_lock_irqsave(&card->buffer_lock, flags);
list_add_tail(&packet->list, &card->cmd_packet_list);
spin_unlock_irqrestore(&card->buffer_lock, flags);
break;
case MVMS_DAT:
priv->dnld_sent = DNLD_DATA_SENT;
spin_lock_irqsave(&card->buffer_lock, flags);
list_add_tail(&packet->list, &card->data_packet_list);
spin_unlock_irqrestore(&card->buffer_lock, flags);
break;
default:
kfree(packet);
netdev_err(priv->dev, "can't transfer buffer of type %d\n",
type);
err = -EINVAL;
break;
}
/* Queue spi xfer work */
queue_work(card->workqueue, &card->packet_work);
out:
lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err);
return err;
}
/*
* Host Interrupts
*
* Service incoming interrupts from the WLAN device. We can't sleep here, so
* don't try to talk on the SPI bus, just queue the SPI xfer work.
*/
static irqreturn_t if_spi_host_interrupt(int irq, void *dev_id)
{
struct if_spi_card *card = dev_id;
queue_work(card->workqueue, &card->packet_work);
return IRQ_HANDLED;
}
/*
* SPI callbacks
*/
static int if_spi_init_card(struct if_spi_card *card)
{
struct lbs_private *priv = card->priv;
int err, i;
u32 scratch;
const struct firmware *helper = NULL;
const struct firmware *mainfw = NULL;
lbs_deb_enter(LBS_DEB_SPI);
err = spu_init(card, card->pdata->use_dummy_writes);
if (err)
goto out;
err = spu_get_chip_revision(card, &card->card_id, &card->card_rev);
if (err)
goto out;
err = spu_read_u32(card, IF_SPI_SCRATCH_4_REG, &scratch);
if (err)
goto out;
if (scratch == SUCCESSFUL_FW_DOWNLOAD_MAGIC)
lbs_deb_spi("Firmware is already loaded for "
"Marvell WLAN 802.11 adapter\n");
else {
/* Check if we support this card */
for (i = 0; i < ARRAY_SIZE(fw_table); i++) {
if (card->card_id == fw_table[i].model)
break;
}
if (i == ARRAY_SIZE(fw_table)) {
netdev_err(priv->dev, "Unsupported chip_id: 0x%02x\n",
card->card_id);
err = -ENODEV;
goto out;
}
err = lbs_get_firmware(&card->spi->dev, card->card_id,
&fw_table[0], &helper, &mainfw);
if (err) {
netdev_err(priv->dev, "failed to find firmware (%d)\n",
err);
goto out;
}
lbs_deb_spi("Initializing FW for Marvell WLAN 802.11 adapter "
"(chip_id = 0x%04x, chip_rev = 0x%02x) "
"attached to SPI bus_num %d, chip_select %d. "
"spi->max_speed_hz=%d\n",
card->card_id, card->card_rev,
card->spi->master->bus_num,
card->spi->chip_select,
card->spi->max_speed_hz);
err = if_spi_prog_helper_firmware(card, helper);
if (err)
goto out;
err = if_spi_prog_main_firmware(card, mainfw);
if (err)
goto out;
lbs_deb_spi("loaded FW for Marvell WLAN 802.11 adapter\n");
}
err = spu_set_interrupt_mode(card, 0, 1);
if (err)
goto out;
out:
lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
return err;
}
static void if_spi_resume_worker(struct work_struct *work)
{
struct if_spi_card *card;
card = container_of(work, struct if_spi_card, resume_work);
if (card->suspended) {
if (card->pdata->setup)
card->pdata->setup(card->spi);
/* Init card ... */
if_spi_init_card(card);
enable_irq(card->spi->irq);
/* And resume it ... */
lbs_resume(card->priv);
card->suspended = 0;
}
}
static int if_spi_probe(struct spi_device *spi)
{
struct if_spi_card *card;
struct lbs_private *priv = NULL;
struct libertas_spi_platform_data *pdata = dev_get_platdata(&spi->dev);
int err = 0;
lbs_deb_enter(LBS_DEB_SPI);
if (!pdata) {
err = -EINVAL;
goto out;
}
if (pdata->setup) {
err = pdata->setup(spi);
if (err)
goto out;
}
/* Allocate card structure to represent this specific device */
card = kzalloc(sizeof(struct if_spi_card), GFP_KERNEL);
if (!card) {
err = -ENOMEM;
goto teardown;
}
spi_set_drvdata(spi, card);
card->pdata = pdata;
card->spi = spi;
card->prev_xfer_time = jiffies;
INIT_LIST_HEAD(&card->cmd_packet_list);
INIT_LIST_HEAD(&card->data_packet_list);
spin_lock_init(&card->buffer_lock);
/* Initialize the SPI Interface Unit */
/* Firmware load */
err = if_spi_init_card(card);
if (err)
goto free_card;
/*
* Register our card with libertas.
* This will call alloc_etherdev.
*/
priv = lbs_add_card(card, &spi->dev);
if (!priv) {
err = -ENOMEM;
goto free_card;
}
card->priv = priv;
priv->setup_fw_on_resume = 1;
priv->card = card;
priv->hw_host_to_card = if_spi_host_to_card;
priv->enter_deep_sleep = NULL;
priv->exit_deep_sleep = NULL;
priv->reset_deep_sleep_wakeup = NULL;
priv->fw_ready = 1;
/* Initialize interrupt handling stuff. */
card->workqueue = alloc_workqueue("libertas_spi", WQ_MEM_RECLAIM, 0);
INIT_WORK(&card->packet_work, if_spi_host_to_card_worker);
INIT_WORK(&card->resume_work, if_spi_resume_worker);
err = request_irq(spi->irq, if_spi_host_interrupt,
IRQF_TRIGGER_FALLING, "libertas_spi", card);
if (err) {
pr_err("can't get host irq line-- request_irq failed\n");
goto terminate_workqueue;
}
/*
* Start the card.
* This will call register_netdev, and we'll start
* getting interrupts...
*/
err = lbs_start_card(priv);
if (err)
goto release_irq;
lbs_deb_spi("Finished initializing WLAN module.\n");
/* successful exit */
goto out;
release_irq:
free_irq(spi->irq, card);
terminate_workqueue:
destroy_workqueue(card->workqueue);
lbs_remove_card(priv); /* will call free_netdev */
free_card:
free_if_spi_card(card);
teardown:
if (pdata->teardown)
pdata->teardown(spi);
out:
lbs_deb_leave_args(LBS_DEB_SPI, "err %d\n", err);
return err;
}
static int libertas_spi_remove(struct spi_device *spi)
{
struct if_spi_card *card = spi_get_drvdata(spi);
struct lbs_private *priv = card->priv;
lbs_deb_spi("libertas_spi_remove\n");
lbs_deb_enter(LBS_DEB_SPI);
cancel_work_sync(&card->resume_work);
lbs_stop_card(priv);
lbs_remove_card(priv); /* will call free_netdev */
free_irq(spi->irq, card);
destroy_workqueue(card->workqueue);
if (card->pdata->teardown)
card->pdata->teardown(spi);
free_if_spi_card(card);
lbs_deb_leave(LBS_DEB_SPI);
return 0;
}
static int if_spi_suspend(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct if_spi_card *card = spi_get_drvdata(spi);
if (!card->suspended) {
lbs_suspend(card->priv);
flush_workqueue(card->workqueue);
disable_irq(spi->irq);
if (card->pdata->teardown)
card->pdata->teardown(spi);
card->suspended = 1;
}
return 0;
}
static int if_spi_resume(struct device *dev)
{
struct spi_device *spi = to_spi_device(dev);
struct if_spi_card *card = spi_get_drvdata(spi);
/* Schedule delayed work */
schedule_work(&card->resume_work);
return 0;
}
static const struct dev_pm_ops if_spi_pm_ops = {
.suspend = if_spi_suspend,
.resume = if_spi_resume,
};
static struct spi_driver libertas_spi_driver = {
.probe = if_spi_probe,
.remove = libertas_spi_remove,
.driver = {
.name = "libertas_spi",
.pm = &if_spi_pm_ops,
},
};
/*
* Module functions
*/
static int __init if_spi_init_module(void)
{
int ret = 0;
lbs_deb_enter(LBS_DEB_SPI);
printk(KERN_INFO "libertas_spi: Libertas SPI driver\n");
ret = spi_register_driver(&libertas_spi_driver);
lbs_deb_leave(LBS_DEB_SPI);
return ret;
}
static void __exit if_spi_exit_module(void)
{
lbs_deb_enter(LBS_DEB_SPI);
spi_unregister_driver(&libertas_spi_driver);
lbs_deb_leave(LBS_DEB_SPI);
}
module_init(if_spi_init_module);
module_exit(if_spi_exit_module);
MODULE_DESCRIPTION("Libertas SPI WLAN Driver");
MODULE_AUTHOR("Andrey Yurovsky <[email protected]>, "
"Colin McCabe <[email protected]>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("spi:libertas_spi");
| {
"pile_set_name": "Github"
} |
/*!
@page compile Compiling GLFW
@tableofcontents
This is about compiling the GLFW library itself. For information on how to
build programs that use GLFW, see the @ref build guide.
@section compile_deps Dependencies
To compile GLFW and the accompanying example programs, you will need **CMake**,
which will generate the project files or makefiles for your particular
development environment. If you are on a Unix-like system such as Linux or
FreeBSD or have a package system like Fink, MacPorts, Cygwin or Homebrew, you
can simply install its CMake package. If not, you can get installers for
Windows and OS X from the [CMake website](http://www.cmake.org/).
Additional dependencies are listed below.
If you wish to compile GLFW without CMake, see @ref compile_manual.
@subsection compile_deps_msvc Dependencies using Visual C++ on Windows
The Microsoft Platform SDK that is installed along with Visual C++ contains all
the necessary headers, link libraries and tools except for CMake.
@subsection compile_deps_mingw Dependencies with MinGW or MinGW-w64 on Windows
Both the MinGW and the MinGW-w64 packages contain all the necessary headers,
link libraries and tools except for CMake.
@subsection compile_deps_mingw_cross Dependencies using MinGW or MinGW-w64 cross-compilation
Both Cygwin and many Linux distributions have MinGW or MinGW-w64 packages. For
example, Cygwin has the `mingw64-i686-gcc` and `mingw64-x86_64-gcc` packages
for 32- and 64-bit version of MinGW-w64, while Debian GNU/Linux and derivatives
like Ubuntu have the `mingw-w64` package for both.
GLFW has CMake toolchain files in the `CMake/` directory that allow for easy
cross-compilation of Windows binaries. To use these files you need to add a
special parameter when generating the project files or makefiles:
cmake -DCMAKE_TOOLCHAIN_FILE=<toolchain-file> .
The exact toolchain file to use depends on the prefix used by the MinGW or
MinGW-w64 binaries on your system. You can usually see this in the /usr
directory. For example, both the Debian/Ubuntu and Cygwin MinGW-w64 packages
have `/usr/x86_64-w64-mingw32` for the 64-bit compilers, so the correct
invocation would be:
cmake -DCMAKE_TOOLCHAIN_FILE=CMake/x86_64-w64-mingw32.cmake .
For more details see the article
[CMake Cross Compiling](http://www.paraview.org/Wiki/CMake_Cross_Compiling) on
the CMake wiki.
@subsection compile_deps_xcode Dependencies using Xcode on OS X
Xcode contains all necessary tools except for CMake. The necessary headers and
libraries are included in the core OS frameworks. Xcode can be downloaded from
the Mac App Store or from the ADC Member Center.
@subsection compile_deps_x11 Dependencies using Linux and X11
To compile GLFW for X11, you need to have the X11 and OpenGL header packages
installed, as well as the basic development tools like GCC and make. For
example, on Ubuntu and other distributions based on Debian GNU/Linux, you need
to install the `xorg-dev` and `libglu1-mesa-dev` packages. The former pulls in
all X.org header packages and the latter pulls in the Mesa OpenGL and GLU
packages. GLFW itself doesn't need or use GLU, but some of the examples do.
Note that using header files and libraries from Mesa during compilation *will
not* tie your binaries to the Mesa implementation of OpenGL.
@section compile_cmake Generating files with CMake
Once you have all necessary dependencies it is time to generate the project
files or makefiles for your development environment. CMake needs to know two
paths for this: the path to the *root* directory of the GLFW source tree (i.e.
*not* the `src` subdirectory) and the target path for the generated files and
compiled binaries. If these are the same, it is called an in-tree build,
otherwise it is called an out-of-tree build.
One of several advantages of out-of-tree builds is that you can generate files
and compile for different development environments using a single source tree.
@subsection compile_cmake_cli Generating files with the CMake command-line tool
To make an in-tree build, enter the *root* directory of the GLFW source tree
(i.e. *not* the `src` subdirectory) and run CMake. The current directory is
used as target path, while the path provided as an argument is used to find the
source tree.
cd <glfw-root-dir>
cmake .
To make an out-of-tree build, make another directory, enter it and run CMake
with the (relative or absolute) path to the root of the source tree as an
argument.
cd <glfw-root-dir>
mkdir build
cd build
cmake ..
@subsection compile_cmake_gui Generating files with the CMake GUI
If you are using the GUI version, choose the root of the GLFW source tree as
source location and the same directory or another, empty directory as the
destination for binaries. Choose *Configure*, change any options you wish to,
*Configure* again to let the changes take effect and then *Generate*.
@section compile_options CMake options
The CMake files for GLFW provide a number of options, although not all are
available on all supported platforms. Some of these are de facto standards
among CMake users and so have no `GLFW_` prefix.
If you are using the GUI version of CMake, these are listed and can be changed
from there. If you are using the command-line version, use the `ccmake` tool.
Some package systems like Ubuntu and other distributions based on Debian
GNU/Linux have this tool in a separate `cmake-curses-gui` package.
@subsection compile_options_shared Shared CMake options
`BUILD_SHARED_LIBS` determines whether GLFW is built as a static
library or as a DLL / shared library / dynamic library.
`LIB_SUFFIX` affects where the GLFW shared /dynamic library is installed. If it
is empty, it is installed to `${CMAKE_INSTALL_PREFIX}/lib`. If it is set to
`64`, it is installed to `${CMAKE_INSTALL_PREFIX}/lib64`.
`GLFW_CLIENT_LIBRARY` determines which client API library to use. If set to
`opengl` the OpenGL library is used, if set to `glesv1` for the OpenGL ES 1.x
library is used, or if set to `glesv2` the OpenGL ES 2.0 library is used. The
selected library and its header files must be present on the system for this to
work.
`GLFW_BUILD_EXAMPLES` determines whether the GLFW examples are built
along with the library.
`GLFW_BUILD_TESTS` determines whether the GLFW test programs are
built along with the library.
`GLFW_BUILD_DOCS` determines whether the GLFW documentation is built along with
the library.
@subsection compile_options_osx OS X specific CMake options
`GLFW_USE_CHDIR` determines whether `glfwInit` changes the current
directory of bundled applications to the `Contents/Resources` directory.
`GLFW_USE_MENUBAR` determines whether the first call to
`glfwCreateWindow` sets up a minimal menu bar.
`GLFW_BUILD_UNIVERSAL` determines whether to build Universal Binaries.
@subsection compile_options_win32 Windows specific CMake options
`USE_MSVC_RUNTIME_LIBRARY_DLL` determines whether to use the DLL version or the
static library version of the Visual C++ runtime library. If set to `ON`, the
DLL version of the Visual C++ library is used. It is recommended to set this to
`ON`, as this keeps the executable smaller and benefits from security and bug
fix updates of the Visual C++ runtime.
`GLFW_USE_DWM_SWAP_INTERVAL` determines whether the swap interval is set even
when DWM compositing is enabled. If this is `ON`, the swap interval is set even
if DWM is enabled. It is recommended to set this to `OFF`, as doing otherwise
can lead to severe jitter.
`GLFW_USE_OPTIMUS_HPG` determines whether to export the `NvOptimusEnablement`
symbol, which forces the use of the high-performance GPU on nVidia Optimus
systems.
@subsection compile_options_egl EGL specific CMake options
`GLFW_USE_EGL` determines whether to use EGL instead of the platform-specific
context creation API. Note that EGL is not yet provided on all supported
platforms.
@section compile_manual Compiling GLFW manually
If you wish to compile GLFW without its CMake build environment then you will
have to do at least some of the platform detection yourself. GLFW needs
a number of configuration macros to be defined in order to know what it's being
compiled for and has many optional, platform-specific ones for various features.
When building with CMake, the `glfw_config.h` configuration header is generated
based on the current platform and CMake options. The GLFW CMake environment
defines `_GLFW_USE_CONFIG_H`, which causes this header to be included by
`internal.h`. Without this macro, GLFW will expect the necessary configuration
macros to be defined on the command-line.
Three macros *must* be defined when compiling GLFW: one for selecting the window
creation API, one selecting the context creation API and one client library.
Exactly one of each kind must be defined for GLFW to compile and link.
The window creation API is used to create windows, handle input, monitors, gamma
ramps and clipboard. The options are:
- `_GLFW_COCOA` to use the Cocoa frameworks
- `_GLFW_WIN32` to use the Win32 API
- `_GLFW_X11` to use the X Window System
The context creation API is used to enumerate pixel formats / framebuffer
configurations and to create contexts. The options are:
- `_GLFW_NSGL` to use the Cocoa OpenGL framework
- `_GLFW_WGL` to use the Win32 WGL API
- `_GLFW_GLX` to use the X11 GLX API
- `_GLFW_EGL` to use the EGL API (experimental)
The client library is the one providing the OpenGL or OpenGL ES API, which is
used by GLFW to probe the created context. This is not the same thing as the
client API, as many desktop OpenGL client libraries now expose the OpenGL ES API
through extensions. The options are:
- `_GLFW_USE_OPENGL` for the desktop OpenGL (opengl32.dll, libGL.so or
OpenGL.framework)
- `_GLFW_USE_GLESV1` for OpenGL ES 1.x (experimental)
- `_GLFW_USE_GLESV2` for OpenGL ES 2.x (experimental)
Note that `_GLFW_USE_GLESV1` and `_GLFW_USE_GLESV2` may only be used with EGL,
as the other context creation APIs do not interface with OpenGL ES client
libraries.
If you are building GLFW as a shared library / dynamic library / DLL then you
must also define `_GLFW_BUILD_DLL`. Otherwise, you may not define it.
If you are using the X11 window creation API then you *must* also select an entry
point retrieval mechanism.
- `_GLFW_HAS_GLXGETPROCADDRESS` to use `glXGetProcAddress` (recommended)
- `_GLFW_HAS_GLXGETPROCADDRESSARB` to use `glXGetProcAddressARB` (legacy)
- `_GLFW_HAS_GLXGETPROCADDRESSEXT` to use `glXGetProcAddressEXT` (legacy)
- `_GLFW_HAS_DLOPEN` to do manual retrieval with `dlopen` (fallback)
If you are using the Cocoa window creation API, the following options are
available:
- `_GLFW_USE_CHDIR` to `chdir` to the `Resources` subdirectory of the
application bundle during @ref glfwInit (recommended)
- `_GLFW_USE_MENUBAR` to create and populate the menu bar when the first window
is created (recommended)
*/
| {
"pile_set_name": "Github"
} |
#pragma once
#include <pu/Plutonium>
namespace ui
{
class AboutLayout : public pu::ui::Layout
{
public:
AboutLayout();
PU_SMART_CTOR(AboutLayout);
void about_Input(u64 Down, u64 Up, u64 Held);
private:
pu::ui::elm::TextBlock::Ref leftText;
pu::ui::elm::TextBlock::Ref rightText;
pu::ui::elm::Rectangle::Ref bground;
};
}
| {
"pile_set_name": "Github"
} |
# V5: Requerimientos de Comunicación a través de la red
## Objetivo de Control
Los controles enumerados en esta categoría tienen por objetivo asegurar la confidencialidad e integridad de la información intercambiada entre la aplicación móvil y los servicios del servidor. Como mínimo se deben utilizar canales seguros y cifrados utilizando el protocolo TLS con las configuraciones apropiadas. En el nivel 2 se establecen medidas en profundidad como fijación de certificados SSL.
## Requerimientos de Verificación de Seguridad
| # | MSTG-ID | Descripción | L1 | L2 |
| -- | -------- | ---------------------- | - | - |
| **5.1** | MSTG-NETWORK-1 | La información es enviada cifrada utilizando TLS. El canal seguro es usado consistentemente en la aplicación. | ✓ | ✓ |
| **5.2** | MSTG-NETWORK-2 | Las configuraciones del protocolo TLS siguen las mejores prácticas de la industria, o lo hacen lo mejor posible en caso de que el sistema operativo del dispositivo no soporte los estándares recomendados. | ✓ | ✓ |
| **5.3** | MSTG-NETWORK-3 | La aplicación verifica el certificado X.509 del sistema remoto al establecer el canal seguro y sólo se aceptan certificados firmados por una CA de confianza. | ✓ | ✓ |
| **5.4** | MSTG-NETWORK-4 | La aplicación utiliza su propio almacén de certificados o realiza _pinning_ del certificado o la clave pública del servidor. Bajo ningún concepto establecerá conexiones con servidores que ofrecen otros certificados o claves, incluso si están firmados por una CA de confianza. | | ✓ |
| **5.5** | MSTG-NETWORK-5 | La aplicación no depende de un único canal de comunicaciones inseguro (email o SMS) para operaciones críticas como registro de usuarios o recuperación de cuentas. | | ✓ |
| **5.6** | MSTG-NETWORK-6 | La aplicación sólo depende de bibliotecas de conectividad y seguridad actualizadas. | | ✓ |
<!-- \pagebreak -->
## Referencias
La Guía de Pruebas de Seguridad Móvil de OWASP proporciona instrucciones detalladas para verificar los requisitos listados en esta sección.
- Android - <https://github.com/OWASP/owasp-mstg/blob/master/Document/0x05g-Testing-Network-Communication.md>
- iOS - <https://github.com/OWASP/owasp-mstg/blob/master/Document/0x06g-Testing-Network-Communication.md>
Para más información, ver también:
- OWASP Mobile Top 10: M3 (Insecure Communication) - <https://owasp.org/www-project-mobile-top-10/2016-risks/m3-insecure-communication>
- CWE 295 (Improper Certificate Validation) - <https://cwe.mitre.org/data/definitions/295.html>
- CWE 296 (Improper Following of a Certificate's Chain of Trust) - <https://cwe.mitre.org/data/definitions/296.html>
- CWE 297 (Improper Validation of Certificate with Host Mismatch) - <https://cwe.mitre.org/data/definitions/297.html>
- CWE 298 (Improper Validation of Certificate Expiration) - <https://cwe.mitre.org/data/definitions/298.html>
- CWE 308 (Use of Single-factor Authentication) - <https://cwe.mitre.org/data/definitions/308.html>
- CWE 319 (Cleartext Transmission of Sensitive Information) - <https://cwe.mitre.org/data/definitions/319.html>
- CWE 326 (Inadequate Encryption Strength) - <https://cwe.mitre.org/data/definitions/326.html>
- CWE 327 (Use of a Broken or Risky Cryptographic Algorithm) - <https://cwe.mitre.org/data/definitions/327.html>
- CWE 780 (Use of RSA Algorithm without OAEP) - <https://cwe.mitre.org/data/definitions/780.html>
- CWE 940 (Improper Verification of Source of a Communication Channel) - <https://cwe.mitre.org/data/definitions/940.html>
- CWE 941 (Incorrectly Specified Destination in a Communication Channel) - <https://cwe.mitre.org/data/definitions/941.html>
| {
"pile_set_name": "Github"
} |
// Copyright 2016 The LUCI Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package prpc (provisional RPC) implements an RPC client over HTTP 1.x.
//
// Like gRPC:
// - services are defined in .proto files
// - service implementation does not depend on pRPC.
// Unlike gRPC:
// - supports HTTP 1.x and AppEngine 1.x.
// - does not support streams.
//
// Server
//
// Type Server implements a pRPC server.
//
// Example of usage: https://github.com/luci/luci-go/tree/master/examples/appengine/helloworld_standard
//
// Package discovery implements service discovery.
//
// Compile service definitions
//
// Use cproto tool to compile .proto files to .go files with gRPC and pRPC support.
// It runs protoc and does some additional postprocessing.
//
// //go:generate cproto
//
// Install cproto:
// go install go.chromium.org/luci/grpc/cmd/cproto
//
// Protocol
//
// ## v.1.2
//
// v1.2 is small, backward-compatible amendment to v1.1 that adds support for
// error details.
//
// Response header "X-Prpc-Status-Details-Bin" contains elements of
// google.rpc.Status.details field, one value per element, in the same order.
// The header value is a standard base64 string of the encoded
// google.protobuf.Any, where the message encoding is the same as the response
// message encoding, i.e. depends on Accept request header.
//
// ## v1.1
//
// v1.1 is small, backward-compatible amendment to v1.0 to address a
// security issue. Since it is backward compatible, it does not introduce
// a formal protocol version header at this time.
//
// Changes:
// - Requests/responses must use "application/json" media type instead of
// "application/prpc; encoding=json".
// - Responses must include "X-Content-Type-Options: nosniff" HTTP header.
//
// This enables CORB protection (which mitigates spectre) and disables content
// sniffing.
// For CORB, see https://chromium.googlesource.com/chromium/src/+/master/services/network/cross_origin_read_blocking_explainer.md.
//
// ## v1.0
//
// This section describes the pRPC protocol. It is based on HTTP 1.x and employs
// gRPC codes.
//
// A pRPC server MUST handle HTTP POST requests at `/prpc/{service}/{method}`
// where service is a full service name including full package name.
// The handler must decode an input message from an HTTP request,
// call the service method implementation and
// encode the returned output message or error to the HTTP response.
//
// pRPC protocol defines three protocol buffer encodings and media types.
// - Binary: "application/prpc; encoding=binary" (default).
// - JSON: "application/prpc; encoding=json" or "application/json"
// A response body MUST have `)]}'\n` prefix to prevent XSSI.
// - Text: "application/prpc; encoding=text"
// A pRPC server MUST support Binary and SHOULD support JSON and Text.
//
// Request headers:
// - "X-Prpc-Timeout": specifies request timeout.
// A client MAY specify it.
// If a service hits the timeout, a server MUST respond with HTTP 503 and
// DeadlineExceed gRPC code.
// Value format: `\d+[HMSmun]` (regular expression), where
// - H - hour
// - M - minute
// - S - second
// - m - millisecond
// - u - microsecond
// - n - nanosecond
// - "Content-Type": specifies input message encoding in the body.
// A client SHOULD specify it.
// If not present, a server MUST treat the input message as Binary.
// - "Accept": specifies the output message encoding for the response.
// A client MAY specify it, a server MUST support it.
// - Any other headers MUST be added to metadata.MD in the context that is
// passed to the service method implementation.
// - If a header name has "-Bin" suffix, the server must treat it as
// standard-base64-encoded.
//
// Response headers:
// - "X-Prpc-Grpc-Code": specifies the gRPC code.
// A server MUST specify it in all cases.
// - If it is present, the client MUST ignore the HTTP status code.
// - If OK, the client MUST return the output message
// decoded from the body.
// - Otherwise, the client MUST return a gRPC error with the
// code and message read from the response body.
// - If not present, the client MUST return a non-gRPC error
// with message read from the response body.
// A client MAY read a portion of the response body.
// - "Content-Type": specifies the output message encoding.
// A server SHOULD specify it.
// If not specified, a client MUST treat it is as Binary.
// - Any metadata returned by a service method implementation MUST go into
// http headers, unless metadata key starts with "X-Prpc-".
//
// A server MUST always specify "X-Prpc-Grpc-Code".
// The server SHOULD specify HTTP status corresponding to the gRPC code.
//
// If the "X-Prpc-Grpc-Code" response header value is not 0, the response body
// MUST contain a description of the error.
//
// If a service/method is not found, the server MUST respond with Unimplemented
// gRPC code and SHOULD specify HTTP 501 status.
package prpc
| {
"pile_set_name": "Github"
} |
package com.packt.webstore.domain.repository.impl;
import java.util.HashMap;
import java.util.Map;
import org.springframework.stereotype.Repository;
import com.packt.webstore.domain.Cart;
import com.packt.webstore.domain.repository.CartRepository;
@Repository
public class InMemoryCartRepository implements CartRepository{
private Map<String, Cart> listOfCarts;
public InMemoryCartRepository() {
listOfCarts = new HashMap<String,Cart>();
}
public Cart create(Cart cart) {
if(listOfCarts.keySet().contains(cart.getCartId())) {
throw new IllegalArgumentException(String.format("Can not create a cart. A cart with the give id (%) aldrady exist",cart.getCartId()));
}
listOfCarts.put(cart.getCartId(), cart);
return cart;
}
public Cart read(String cartId) {
return listOfCarts.get(cartId);
}
public void update(String cartId, Cart cart) {
if(!listOfCarts.keySet().contains(cartId)) {
throw new IllegalArgumentException(String.format("Can not update cart. The cart with the give id (%) does not does not exist",cartId));
}
listOfCarts.put(cartId, cart);
}
public void delete(String cartId) {
if(!listOfCarts.keySet().contains(cartId)) {
throw new IllegalArgumentException(String.format("Can not delete cart. The cart with the give id (%) does not does not exist",cartId));
}
listOfCarts.remove(cartId);
}
}
| {
"pile_set_name": "Github"
} |
/*
Open Tracker License
Terms and Conditions
Copyright (c) 1991-2000, Be Incorporated. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice applies to all licensees
and shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF TITLE, MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
BE INCORPORATED BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Be Incorporated shall not be
used in advertising or otherwise to promote the sale, use or other dealings in
this Software without prior written authorization from Be Incorporated.
Tracker(TM), Be(R), BeOS(R), and BeIA(TM) are trademarks or registered trademarks
of Be Incorporated in the United States and other countries. Other brand product
names are registered trademarks or trademarks of their respective holders.
All rights reserved.
*/
#ifndef _COUNT_VIEW_H
#define _COUNT_VIEW_H
#include <String.h>
#include <View.h>
namespace BPrivate {
class BPoseView;
class BCountView : public BView {
// displays the item count and a barber pole while the view is updating
public:
BCountView(BPoseView*);
~BCountView();
virtual void Draw(BRect);
virtual void MouseDown(BPoint);
virtual void AttachedToWindow();
virtual void Pulse();
void CheckCount();
void StartBarberPole();
void EndBarberPole();
void SetTypeAhead(const char*);
const char* TypeAhead() const;
bool IsTypingAhead() const;
void AddFilterCharacter(const char* character);
void RemoveFilterCharacter();
void CancelFilter();
const char* Filter() const;
bool IsFiltering() const;
private:
BRect BarberPoleInnerRect() const;
BRect BarberPoleOuterRect() const;
BRect TextInvalRect() const;
BRect TextAndBarberPoleRect() const;
void TrySpinningBarberPole();
int32 fLastCount;
BPoseView* fPoseView;
bool fShowingBarberPole : 1;
BBitmap* fBarberPoleMap;
float fLastBarberPoleOffset;
bigtime_t fStartSpinningAfter;
BString fTypeAheadString;
BString fFilterString;
};
} // namespace BPrivate
using namespace BPrivate;
#endif // _COUNT_VIEW_H
| {
"pile_set_name": "Github"
} |
'use strict';
const { Events } = require('../../../util/Constants');
module.exports = (client, packet) => {
const { old, updated } = client.actions.MessageUpdate.handle(packet.d);
if (old && updated) {
/**
* Emitted whenever a message is updated - e.g. embed or content change.
* @event Client#messageUpdate
* @param {Message} oldMessage The message before the update
* @param {Message} newMessage The message after the update
*/
client.emit(Events.MESSAGE_UPDATE, old, updated);
}
};
| {
"pile_set_name": "Github"
} |
<?php
/*
+--------------------------------------------------------------------+
| Copyright CiviCRM LLC. All rights reserved. |
| |
| This work is published under the GNU AGPLv3 license with some |
| permitted exceptions and without any warranty. For full license |
| and copyright information, see https://civicrm.org/licensing |
+--------------------------------------------------------------------+
*/
/**
* Class api_v3_NavigationTest
* @group headless
*/
class api_v3_NavigationTest extends CiviUnitTestCase {
protected $_apiversion = 3;
protected $_params;
protected $_entity = 'Navigation';
/**
* Test get function.
* @param int $version
* @dataProvider versionThreeAndFour
*/
public function testGet($version) {
$this->_apiversion = $version;
$this->callAPISuccess($this->_entity, 'getsingle', ['label' => 'Manage Groups', 'domain_id' => 1]);
}
/**
* Test get specifying parent
* FIXME: Api4
*/
public function testGetByParent() {
// get by name
$this->callAPISuccess($this->_entity, 'get', ['parentID' => 'Administer', 'domain_id' => 1]);
$params = [
'name' => 'Administer',
'domain_id' => 1,
'return' => 'id',
];
$adminId = $this->callAPISuccess($this->_entity, 'getvalue', $params);
$this->callAPISuccess($this->_entity, 'get', ['parentID' => $adminId, 'domain_id' => 1]);
}
/**
* Test create function.
* @param int $version
* @dataProvider versionThreeAndFour
*/
public function testCreate($version) {
$this->_apiversion = $version;
$params = ['label' => 'Feed the Goats', 'domain_id' => 1];
$result = $this->callAPISuccess($this->_entity, 'create', $params);
$this->getAndCheck($params, $result['id'], $this->_entity, TRUE);
}
/**
* Test create function.
* @param int $version
* @dataProvider versionThreeAndFour
*/
public function testDefaultDomain($version) {
$this->_apiversion = $version;
$params = ['label' => 'Herd the Cats'];
$result = $this->callAPISuccess($this->_entity, 'create', $params);
// Check domain_id has been set per default
$params['domain_id'] = CRM_Core_Config::domainID();
$this->getAndCheck($params, $result['id'], $this->_entity, TRUE);
}
/**
* Test delete function.
* @param int $version
* @dataProvider versionThreeAndFour
*/
public function testDelete($version) {
$this->_apiversion = $version;
$getParams = [
'return' => 'id',
'options' => ['limit' => 1],
];
$result = $this->callAPISuccess('Navigation', 'getvalue', $getParams);
$this->callAPISuccess('Navigation', 'delete', ['id' => $result]);
$this->callAPIFailure('Navigation', 'getvalue', ['id' => $result]);
}
}
| {
"pile_set_name": "Github"
} |
//
// This file was generated by the JavaTM Architecture for XML Binding(JAXB) Reference Implementation, v1.0.6-b27-fcs
// See <a href="http://java.sun.com/xml/jaxb">http://java.sun.com/xml/jaxb</a>
// Any modifications to this file will be lost upon recompilation of the source schema.
// Generated on: 2012.09.08 at 08:59:44 DU CEST
//
package com.sun.identity.diagnostic.base.core.jaxbgen.impl.runtime;
import java.util.StringTokenizer;
import javax.xml.bind.Element;
import javax.xml.bind.ParseConversionEvent;
import javax.xml.bind.ValidationEvent;
import javax.xml.bind.helpers.ParseConversionEventImpl;
import javax.xml.bind.helpers.ValidationEventImpl;
import javax.xml.bind.helpers.ValidationEventLocatorImpl;
import org.xml.sax.Attributes;
import org.xml.sax.SAXException;
import com.sun.xml.bind.JAXBAssertionError;
import com.sun.xml.bind.unmarshaller.Messages;
/**
* Convenient default implementation of
* {@link UnmarshallingEventHandler}
* to minimize code generation.
*
* <p>
* For historical reasons, sometimes this type is used where
* {@link UnmarshallingEventHandler} should be used.
*
* Once an exception is in the form of UnmarshalException, we consider
* it to be already reported to the client app.
*/
public abstract class AbstractUnmarshallingEventHandlerImpl implements UnmarshallingEventHandler
{
public AbstractUnmarshallingEventHandlerImpl(UnmarshallingContext _ctxt,
String _stateTextTypes ) {
this.context = _ctxt;
this.stateTextTypes = _stateTextTypes;
}
public final UnmarshallingContext context;
/**
* Text type of states encoded into a string.
* 'L' means a list state.
*/
private final String stateTextTypes;
//
//
// methods that will be provided by the generated code.
//
//
// internal events
public void enterElement(String uri, String local, String qname, Attributes atts) throws SAXException {
unexpectedEnterElement(uri,local,qname,atts);
}
public void leaveElement(String uri, String local, String qname) throws SAXException {
unexpectedLeaveElement(uri,local,qname);
}
public final void text(String text) throws SAXException {
if(isListState()) {
// in list state, we don't need to care about whitespaces.
// if the text is all whitespace, this won't generate a text event,
// so it would be just fine.
StringTokenizer tokens = new StringTokenizer(text);
if( tokens.countTokens()==1 ) {
handleText(text);
} else {
while(tokens.hasMoreTokens())
// the handler can be switched during the text processing,
// so the current handler has to be obtained inside the loop
context.getCurrentHandler().text(tokens.nextToken());
}
} else {
// otherwise process this token
handleText(text);
}
}
protected void handleText(String s) throws SAXException {
unexpectedText(s);
}
public void enterAttribute(String uri, String local, String qname) throws SAXException {
unexpectedEnterAttribute(uri,local,qname);
}
public void leaveAttribute(String uri, String local, String qname) throws SAXException {
unexpectedLeaveAttribute(uri,local,qname);
}
public void leaveChild(int nextState) throws SAXException {
this.state = nextState;
}
/**
* Checks if the current state is marked as a list state.
*/
protected final boolean isListState() {
return stateTextTypes.charAt(state)=='L';
}
/** Current state of this automaton. */
public int state;
//
//
// utility methods
//
//
/** Called when a RuntimeException is thrown during unmarshalling a text. */
protected void handleUnexpectedTextException( String text, RuntimeException e ) throws SAXException {
// report this as an error
reportError( Messages.format(Messages.UNEXPECTED_TEXT,text), e, true );
}
/**
* Last resort when something goes terribly wrong within the unmarshaller.
*/
protected void handleGenericException( Exception e ) throws SAXException {
reportError( e.getMessage(), e, false );
}
protected final void dump() {
System.err.println("state is :"+state);
}
private void reportError( String msg, boolean canRecover ) throws SAXException {
reportError( msg, null, canRecover );
}
private void reportError( String msg, Exception nested, boolean canRecover ) throws SAXException {
context.handleEvent( new ValidationEventImpl(
canRecover? ValidationEvent.ERROR : ValidationEvent.FATAL_ERROR,
msg,
new ValidationEventLocatorImpl(context.getLocator()),
nested ), canRecover );
}
protected final void unexpectedEnterElement( String uri, String local, String qname, Attributes atts ) throws SAXException {
// notify the error
reportError( Messages.format(Messages.UNEXPECTED_ENTER_ELEMENT, uri, local ), true );
// then recover by ignoring the whole element.
context.pushContentHandler(new Discarder(context),state);
context.getCurrentHandler().enterElement(uri,local,qname,atts);
}
protected final void unexpectedLeaveElement( String uri, String local, String qname ) throws SAXException {
reportError( Messages.format(Messages.UNEXPECTED_LEAVE_ELEMENT, uri, local ), false );
}
protected final void unexpectedEnterAttribute( String uri, String local, String qname ) throws SAXException {
reportError( Messages.format(Messages.UNEXPECTED_ENTER_ATTRIBUTE, uri, local ), false );
}
protected final void unexpectedLeaveAttribute( String uri, String local, String qname ) throws SAXException {
reportError( Messages.format(Messages.UNEXPECTED_LEAVE_ATTRIBUTE, uri, local ), false );
}
protected final void unexpectedText( String str ) throws SAXException {
// make str printable
str = str.replace('\r',' ').replace('\n',' ').replace('\t',' ').trim();
reportError( Messages.format(Messages.UNEXPECTED_TEXT, str ), true );
}
protected final void unexpectedLeaveChild() throws SAXException {
// I believe this is really a bug of the compiler,
// since when an object spawns a child object, it must be "prepared"
// to receive this event.
dump();
throw new JAXBAssertionError(
Messages.format( Messages.UNEXPECTED_LEAVE_CHILD ) );
}
/**
* This method is called by the generated derived class
* when a datatype parse method throws an exception.
*/
protected void handleParseConversionException(Exception e) throws SAXException {
if( e instanceof RuntimeException )
throw (RuntimeException)e; // don't catch the runtime exception. just let it go.
// wrap it into a ParseConversionEvent and report it
ParseConversionEvent pce = new ParseConversionEventImpl(
ValidationEvent.ERROR, e.getMessage(),
new ValidationEventLocatorImpl(context.getLocator()), e );
context.handleEvent(pce,true);
}
//
//
// spawn a new child object
//
//
private UnmarshallingEventHandler spawnChild( Class clazz, int memento ) {
UnmarshallableObject child;
try {
child = (UnmarshallableObject)clazz.newInstance();
} catch (InstantiationException e) {
throw new InstantiationError(e.getMessage());
} catch (IllegalAccessException e) {
throw new IllegalAccessError(e.getMessage());
}
UnmarshallingEventHandler handler = child.createUnmarshaller(context);
context.pushContentHandler(handler,memento);
return handler;
}
protected final Object spawnChildFromEnterElement(Class clazz, int memento, String uri, String local, String qname, Attributes atts)
throws SAXException {
UnmarshallingEventHandler ueh = spawnChild(clazz,memento);
ueh.enterElement(uri,local,qname,atts);
return ueh.owner();
}
protected final Object spawnChildFromEnterAttribute(Class clazz, int memento, String uri, String local, String qname)
throws SAXException {
UnmarshallingEventHandler ueh = spawnChild(clazz,memento);
ueh.enterAttribute(uri,local,qname);
return ueh.owner();
}
protected final Object spawnChildFromText(Class clazz, int memento, String value)
throws SAXException {
UnmarshallingEventHandler ueh = spawnChild(clazz,memento);
ueh.text(value);
return ueh.owner();
}
// these methods can be used if a child object can be nullable
protected final Object spawnChildFromLeaveElement(Class clazz, int memento, String uri, String local, String qname)
throws SAXException {
UnmarshallingEventHandler ueh = spawnChild(clazz,memento);
ueh.leaveElement(uri,local,qname);
return ueh.owner();
}
protected final Object spawnChildFromLeaveAttribute(Class clazz, int memento, String uri, String local, String qname)
throws SAXException {
UnmarshallingEventHandler ueh = spawnChild(clazz,memento);
ueh.leaveAttribute(uri,local,qname);
return ueh.owner();
}
protected final Element spawnWildcard( int memento, String uri, String local, String qname, Attributes atts )
throws SAXException {
UnmarshallingEventHandler ueh = context.getGrammarInfo().createUnmarshaller(uri,local,context);
if(ueh!=null) {
context.pushContentHandler(ueh,memento);
ueh.enterElement(uri,local,qname,atts);
return (Element)ueh.owner();
} else {
// if no class is available to unmarshal this element, discard
// the sub-tree by feeding events to discarder.
context.pushContentHandler( new Discarder(context), memento );
context.getCurrentHandler().enterElement(uri,local,qname,atts);
return null; // return null so that the discarder will be ignored
}
}
//
//
// spawn a new child handler.
// used for super class and RELAXNG interleave handling.
//
protected final void spawnHandlerFromEnterElement(
UnmarshallingEventHandler unm, int memento, String uri, String local, String qname, Attributes atts )
throws SAXException {
context.pushContentHandler(unm,memento);
unm.enterElement(uri,local,qname,atts);
}
protected final void spawnHandlerFromEnterAttribute(
UnmarshallingEventHandler unm, int memento, String uri, String local, String qname)
throws SAXException {
context.pushContentHandler(unm,memento);
unm.enterAttribute(uri,local,qname);
}
protected final void spawnHandlerFromFromText(
UnmarshallingEventHandler unm, int memento, String value)
throws SAXException {
context.pushContentHandler(unm,memento);
unm.text(value);
}
protected final void spawnHandlerFromLeaveElement(
UnmarshallingEventHandler unm, int memento, String uri, String local, String qname)
throws SAXException {
context.pushContentHandler(unm,memento);
unm.leaveElement(uri,local,qname);
}
protected final void spawnHandlerFromLeaveAttribute(
UnmarshallingEventHandler unm, int memento, String uri, String local, String qname)
throws SAXException {
context.pushContentHandler(unm,memento);
unm.leaveAttribute(uri,local,qname);
}
protected final void spawnHandlerFromText(
UnmarshallingEventHandler unm, int memento, String text )
throws SAXException {
context.pushContentHandler(unm,memento);
unm.text(text);
}
//
//
// revert to parent
//
//
protected final void revertToParentFromEnterElement(String uri,String local, String qname,Attributes atts)
throws SAXException {
context.popContentHandler();
context.getCurrentHandler().enterElement(uri,local,qname,atts);
}
protected final void revertToParentFromLeaveElement(String uri,String local, String qname)
throws SAXException {
context.popContentHandler();
context.getCurrentHandler().leaveElement(uri,local,qname);
}
protected final void revertToParentFromEnterAttribute(String uri,String local, String qname)
throws SAXException {
context.popContentHandler();
context.getCurrentHandler().enterAttribute(uri,local,qname);
}
protected final void revertToParentFromLeaveAttribute(String uri,String local, String qname)
throws SAXException {
context.popContentHandler();
context.getCurrentHandler().leaveAttribute(uri,local,qname);
}
protected final void revertToParentFromText(String value)
throws SAXException {
context.popContentHandler();
context.getCurrentHandler().text(value);
}
}
| {
"pile_set_name": "Github"
} |
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Insert title here</title>
</head>
<body>
<p>Age: <span th:text="${age}?: '年龄为nll'"></span></p>
<p>Age2: <span th:text="${age2}?: '年龄为nll'"></span></p>
</body>
</html> | {
"pile_set_name": "Github"
} |
<?php
/*
* This file is part of PHPExifTool.
*
* (c) 2012 Romain Neutron <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace PHPExiftool\Driver\Tag\ASF;
use JMS\Serializer\Annotation\ExclusionPolicy;
use PHPExiftool\Driver\AbstractTag;
/**
* @ExclusionPolicy("all")
*/
class Text extends AbstractTag
{
protected $Id = 'Text';
protected $Name = 'Text';
protected $FullName = 'ASF::ExtendedDescr';
protected $GroupName = 'ASF';
protected $g0 = 'ASF';
protected $g1 = 'ASF';
protected $g2 = 'Video';
protected $Type = '?';
protected $Writable = false;
protected $Description = 'Text';
}
| {
"pile_set_name": "Github"
} |
#name : Startup
#contributor : Rick Moynihan <[email protected]>
# --
#+STARTUP: ${1:options}
| {
"pile_set_name": "Github"
} |
(function( factory ) {
if ( typeof define === "function" && define.amd ) {
define( ["jquery", "../jquery.validate"], factory );
} else {
factory( jQuery );
}
}(function( $ ) {
/*
* Translated default messages for the jQuery validation plugin.
* Locale: HR (Croatia; hrvatski jezik)
*/
$.extend($.validator.messages, {
required: "Ovo polje je obavezno.",
remote: "Ovo polje treba popraviti.",
email: "Unesite ispravnu e-mail adresu.",
url: "Unesite ispravan URL.",
date: "Unesite ispravan datum.",
dateISO: "Unesite ispravan datum (ISO).",
number: "Unesite ispravan broj.",
digits: "Unesite samo brojeve.",
creditcard: "Unesite ispravan broj kreditne kartice.",
equalTo: "Unesite ponovo istu vrijednost.",
extension: "Unesite vrijednost sa ispravnom ekstenzijom.",
maxlength: $.validator.format("Maksimalni broj znakova je {0} ."),
minlength: $.validator.format("Minimalni broj znakova je {0} ."),
rangelength: $.validator.format("Unesite vrijednost između {0} i {1} znakova."),
range: $.validator.format("Unesite vrijednost između {0} i {1}."),
max: $.validator.format("Unesite vrijednost manju ili jednaku {0}."),
min: $.validator.format("Unesite vrijednost veću ili jednaku {0}.")
});
})); | {
"pile_set_name": "Github"
} |
type=item
items=minecraft:experience_bottle
texture=./blastprot.png
nbt.display.Lore.*=ipattern:*Blast Protection* | {
"pile_set_name": "Github"
} |
//===----------------------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// <string>
// basic_string<charT,traits,Allocator>&
// replace(const_iterator i1, const_iterator i2, const charT* s);
#include <stdio.h>
#include <string>
#include <algorithm>
#include <cassert>
#include "test_macros.h"
#include "min_allocator.h"
template <class S>
void
test(S s, typename S::size_type pos1, typename S::size_type n1, const typename S::value_type* str, S expected)
{
typename S::size_type old_size = s.size();
typename S::const_iterator first = s.begin() + pos1;
typename S::const_iterator last = s.begin() + pos1 + n1;
typename S::size_type xlen = last - first;
s.replace(first, last, str);
LIBCPP_ASSERT(s.__invariants());
assert(s == expected);
typename S::size_type rlen = S::traits_type::length(str);
assert(s.size() == old_size - xlen + rlen);
}
template <class S>
void test0()
{
test(S(""), 0, 0, "", S(""));
test(S(""), 0, 0, "12345", S("12345"));
test(S(""), 0, 0, "1234567890", S("1234567890"));
test(S(""), 0, 0, "12345678901234567890", S("12345678901234567890"));
test(S("abcde"), 0, 0, "", S("abcde"));
test(S("abcde"), 0, 0, "12345", S("12345abcde"));
test(S("abcde"), 0, 0, "1234567890", S("1234567890abcde"));
test(S("abcde"), 0, 0, "12345678901234567890", S("12345678901234567890abcde"));
test(S("abcde"), 0, 1, "", S("bcde"));
test(S("abcde"), 0, 1, "12345", S("12345bcde"));
test(S("abcde"), 0, 1, "1234567890", S("1234567890bcde"));
test(S("abcde"), 0, 1, "12345678901234567890", S("12345678901234567890bcde"));
test(S("abcde"), 0, 2, "", S("cde"));
test(S("abcde"), 0, 2, "12345", S("12345cde"));
test(S("abcde"), 0, 2, "1234567890", S("1234567890cde"));
test(S("abcde"), 0, 2, "12345678901234567890", S("12345678901234567890cde"));
test(S("abcde"), 0, 4, "", S("e"));
test(S("abcde"), 0, 4, "12345", S("12345e"));
test(S("abcde"), 0, 4, "1234567890", S("1234567890e"));
test(S("abcde"), 0, 4, "12345678901234567890", S("12345678901234567890e"));
test(S("abcde"), 0, 5, "", S(""));
test(S("abcde"), 0, 5, "12345", S("12345"));
test(S("abcde"), 0, 5, "1234567890", S("1234567890"));
test(S("abcde"), 0, 5, "12345678901234567890", S("12345678901234567890"));
test(S("abcde"), 1, 0, "", S("abcde"));
test(S("abcde"), 1, 0, "12345", S("a12345bcde"));
test(S("abcde"), 1, 0, "1234567890", S("a1234567890bcde"));
test(S("abcde"), 1, 0, "12345678901234567890", S("a12345678901234567890bcde"));
test(S("abcde"), 1, 1, "", S("acde"));
test(S("abcde"), 1, 1, "12345", S("a12345cde"));
test(S("abcde"), 1, 1, "1234567890", S("a1234567890cde"));
test(S("abcde"), 1, 1, "12345678901234567890", S("a12345678901234567890cde"));
test(S("abcde"), 1, 2, "", S("ade"));
test(S("abcde"), 1, 2, "12345", S("a12345de"));
test(S("abcde"), 1, 2, "1234567890", S("a1234567890de"));
test(S("abcde"), 1, 2, "12345678901234567890", S("a12345678901234567890de"));
test(S("abcde"), 1, 3, "", S("ae"));
test(S("abcde"), 1, 3, "12345", S("a12345e"));
test(S("abcde"), 1, 3, "1234567890", S("a1234567890e"));
test(S("abcde"), 1, 3, "12345678901234567890", S("a12345678901234567890e"));
test(S("abcde"), 1, 4, "", S("a"));
test(S("abcde"), 1, 4, "12345", S("a12345"));
test(S("abcde"), 1, 4, "1234567890", S("a1234567890"));
test(S("abcde"), 1, 4, "12345678901234567890", S("a12345678901234567890"));
test(S("abcde"), 2, 0, "", S("abcde"));
test(S("abcde"), 2, 0, "12345", S("ab12345cde"));
test(S("abcde"), 2, 0, "1234567890", S("ab1234567890cde"));
test(S("abcde"), 2, 0, "12345678901234567890", S("ab12345678901234567890cde"));
test(S("abcde"), 2, 1, "", S("abde"));
test(S("abcde"), 2, 1, "12345", S("ab12345de"));
test(S("abcde"), 2, 1, "1234567890", S("ab1234567890de"));
test(S("abcde"), 2, 1, "12345678901234567890", S("ab12345678901234567890de"));
test(S("abcde"), 2, 2, "", S("abe"));
test(S("abcde"), 2, 2, "12345", S("ab12345e"));
test(S("abcde"), 2, 2, "1234567890", S("ab1234567890e"));
test(S("abcde"), 2, 2, "12345678901234567890", S("ab12345678901234567890e"));
test(S("abcde"), 2, 3, "", S("ab"));
test(S("abcde"), 2, 3, "12345", S("ab12345"));
test(S("abcde"), 2, 3, "1234567890", S("ab1234567890"));
test(S("abcde"), 2, 3, "12345678901234567890", S("ab12345678901234567890"));
test(S("abcde"), 4, 0, "", S("abcde"));
test(S("abcde"), 4, 0, "12345", S("abcd12345e"));
test(S("abcde"), 4, 0, "1234567890", S("abcd1234567890e"));
test(S("abcde"), 4, 0, "12345678901234567890", S("abcd12345678901234567890e"));
test(S("abcde"), 4, 1, "", S("abcd"));
test(S("abcde"), 4, 1, "12345", S("abcd12345"));
test(S("abcde"), 4, 1, "1234567890", S("abcd1234567890"));
test(S("abcde"), 4, 1, "12345678901234567890", S("abcd12345678901234567890"));
test(S("abcde"), 5, 0, "", S("abcde"));
test(S("abcde"), 5, 0, "12345", S("abcde12345"));
test(S("abcde"), 5, 0, "1234567890", S("abcde1234567890"));
test(S("abcde"), 5, 0, "12345678901234567890", S("abcde12345678901234567890"));
test(S("abcdefghij"), 0, 0, "", S("abcdefghij"));
test(S("abcdefghij"), 0, 0, "12345", S("12345abcdefghij"));
test(S("abcdefghij"), 0, 0, "1234567890", S("1234567890abcdefghij"));
test(S("abcdefghij"), 0, 0, "12345678901234567890", S("12345678901234567890abcdefghij"));
test(S("abcdefghij"), 0, 1, "", S("bcdefghij"));
test(S("abcdefghij"), 0, 1, "12345", S("12345bcdefghij"));
test(S("abcdefghij"), 0, 1, "1234567890", S("1234567890bcdefghij"));
test(S("abcdefghij"), 0, 1, "12345678901234567890", S("12345678901234567890bcdefghij"));
test(S("abcdefghij"), 0, 5, "", S("fghij"));
test(S("abcdefghij"), 0, 5, "12345", S("12345fghij"));
test(S("abcdefghij"), 0, 5, "1234567890", S("1234567890fghij"));
test(S("abcdefghij"), 0, 5, "12345678901234567890", S("12345678901234567890fghij"));
test(S("abcdefghij"), 0, 9, "", S("j"));
test(S("abcdefghij"), 0, 9, "12345", S("12345j"));
test(S("abcdefghij"), 0, 9, "1234567890", S("1234567890j"));
test(S("abcdefghij"), 0, 9, "12345678901234567890", S("12345678901234567890j"));
test(S("abcdefghij"), 0, 10, "", S(""));
test(S("abcdefghij"), 0, 10, "12345", S("12345"));
test(S("abcdefghij"), 0, 10, "1234567890", S("1234567890"));
test(S("abcdefghij"), 0, 10, "12345678901234567890", S("12345678901234567890"));
test(S("abcdefghij"), 1, 0, "", S("abcdefghij"));
test(S("abcdefghij"), 1, 0, "12345", S("a12345bcdefghij"));
test(S("abcdefghij"), 1, 0, "1234567890", S("a1234567890bcdefghij"));
test(S("abcdefghij"), 1, 0, "12345678901234567890", S("a12345678901234567890bcdefghij"));
test(S("abcdefghij"), 1, 1, "", S("acdefghij"));
test(S("abcdefghij"), 1, 1, "12345", S("a12345cdefghij"));
test(S("abcdefghij"), 1, 1, "1234567890", S("a1234567890cdefghij"));
test(S("abcdefghij"), 1, 1, "12345678901234567890", S("a12345678901234567890cdefghij"));
}
template <class S>
void test1()
{
test(S("abcdefghij"), 1, 4, "", S("afghij"));
test(S("abcdefghij"), 1, 4, "12345", S("a12345fghij"));
test(S("abcdefghij"), 1, 4, "1234567890", S("a1234567890fghij"));
test(S("abcdefghij"), 1, 4, "12345678901234567890", S("a12345678901234567890fghij"));
test(S("abcdefghij"), 1, 8, "", S("aj"));
test(S("abcdefghij"), 1, 8, "12345", S("a12345j"));
test(S("abcdefghij"), 1, 8, "1234567890", S("a1234567890j"));
test(S("abcdefghij"), 1, 8, "12345678901234567890", S("a12345678901234567890j"));
test(S("abcdefghij"), 1, 9, "", S("a"));
test(S("abcdefghij"), 1, 9, "12345", S("a12345"));
test(S("abcdefghij"), 1, 9, "1234567890", S("a1234567890"));
test(S("abcdefghij"), 1, 9, "12345678901234567890", S("a12345678901234567890"));
test(S("abcdefghij"), 5, 0, "", S("abcdefghij"));
test(S("abcdefghij"), 5, 0, "12345", S("abcde12345fghij"));
test(S("abcdefghij"), 5, 0, "1234567890", S("abcde1234567890fghij"));
test(S("abcdefghij"), 5, 0, "12345678901234567890", S("abcde12345678901234567890fghij"));
test(S("abcdefghij"), 5, 1, "", S("abcdeghij"));
test(S("abcdefghij"), 5, 1, "12345", S("abcde12345ghij"));
test(S("abcdefghij"), 5, 1, "1234567890", S("abcde1234567890ghij"));
test(S("abcdefghij"), 5, 1, "12345678901234567890", S("abcde12345678901234567890ghij"));
test(S("abcdefghij"), 5, 2, "", S("abcdehij"));
test(S("abcdefghij"), 5, 2, "12345", S("abcde12345hij"));
test(S("abcdefghij"), 5, 2, "1234567890", S("abcde1234567890hij"));
test(S("abcdefghij"), 5, 2, "12345678901234567890", S("abcde12345678901234567890hij"));
test(S("abcdefghij"), 5, 4, "", S("abcdej"));
test(S("abcdefghij"), 5, 4, "12345", S("abcde12345j"));
test(S("abcdefghij"), 5, 4, "1234567890", S("abcde1234567890j"));
test(S("abcdefghij"), 5, 4, "12345678901234567890", S("abcde12345678901234567890j"));
test(S("abcdefghij"), 5, 5, "", S("abcde"));
test(S("abcdefghij"), 5, 5, "12345", S("abcde12345"));
test(S("abcdefghij"), 5, 5, "1234567890", S("abcde1234567890"));
test(S("abcdefghij"), 5, 5, "12345678901234567890", S("abcde12345678901234567890"));
test(S("abcdefghij"), 9, 0, "", S("abcdefghij"));
test(S("abcdefghij"), 9, 0, "12345", S("abcdefghi12345j"));
test(S("abcdefghij"), 9, 0, "1234567890", S("abcdefghi1234567890j"));
test(S("abcdefghij"), 9, 0, "12345678901234567890", S("abcdefghi12345678901234567890j"));
test(S("abcdefghij"), 9, 1, "", S("abcdefghi"));
test(S("abcdefghij"), 9, 1, "12345", S("abcdefghi12345"));
test(S("abcdefghij"), 9, 1, "1234567890", S("abcdefghi1234567890"));
test(S("abcdefghij"), 9, 1, "12345678901234567890", S("abcdefghi12345678901234567890"));
test(S("abcdefghij"), 10, 0, "", S("abcdefghij"));
test(S("abcdefghij"), 10, 0, "12345", S("abcdefghij12345"));
test(S("abcdefghij"), 10, 0, "1234567890", S("abcdefghij1234567890"));
test(S("abcdefghij"), 10, 0, "12345678901234567890", S("abcdefghij12345678901234567890"));
test(S("abcdefghijklmnopqrst"), 0, 0, "", S("abcdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 0, 0, "12345", S("12345abcdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 0, 0, "1234567890", S("1234567890abcdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 0, 0, "12345678901234567890", S("12345678901234567890abcdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 0, 1, "", S("bcdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 0, 1, "12345", S("12345bcdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 0, 1, "1234567890", S("1234567890bcdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 0, 1, "12345678901234567890", S("12345678901234567890bcdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 0, 10, "", S("klmnopqrst"));
test(S("abcdefghijklmnopqrst"), 0, 10, "12345", S("12345klmnopqrst"));
test(S("abcdefghijklmnopqrst"), 0, 10, "1234567890", S("1234567890klmnopqrst"));
test(S("abcdefghijklmnopqrst"), 0, 10, "12345678901234567890", S("12345678901234567890klmnopqrst"));
test(S("abcdefghijklmnopqrst"), 0, 19, "", S("t"));
test(S("abcdefghijklmnopqrst"), 0, 19, "12345", S("12345t"));
test(S("abcdefghijklmnopqrst"), 0, 19, "1234567890", S("1234567890t"));
test(S("abcdefghijklmnopqrst"), 0, 19, "12345678901234567890", S("12345678901234567890t"));
test(S("abcdefghijklmnopqrst"), 0, 20, "", S(""));
test(S("abcdefghijklmnopqrst"), 0, 20, "12345", S("12345"));
test(S("abcdefghijklmnopqrst"), 0, 20, "1234567890", S("1234567890"));
test(S("abcdefghijklmnopqrst"), 0, 20, "12345678901234567890", S("12345678901234567890"));
test(S("abcdefghijklmnopqrst"), 1, 0, "", S("abcdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 1, 0, "12345", S("a12345bcdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 1, 0, "1234567890", S("a1234567890bcdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 1, 0, "12345678901234567890", S("a12345678901234567890bcdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 1, 1, "", S("acdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 1, 1, "12345", S("a12345cdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 1, 1, "1234567890", S("a1234567890cdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 1, 1, "12345678901234567890", S("a12345678901234567890cdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 1, 9, "", S("aklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 1, 9, "12345", S("a12345klmnopqrst"));
test(S("abcdefghijklmnopqrst"), 1, 9, "1234567890", S("a1234567890klmnopqrst"));
test(S("abcdefghijklmnopqrst"), 1, 9, "12345678901234567890", S("a12345678901234567890klmnopqrst"));
test(S("abcdefghijklmnopqrst"), 1, 18, "", S("at"));
test(S("abcdefghijklmnopqrst"), 1, 18, "12345", S("a12345t"));
test(S("abcdefghijklmnopqrst"), 1, 18, "1234567890", S("a1234567890t"));
test(S("abcdefghijklmnopqrst"), 1, 18, "12345678901234567890", S("a12345678901234567890t"));
test(S("abcdefghijklmnopqrst"), 1, 19, "", S("a"));
test(S("abcdefghijklmnopqrst"), 1, 19, "12345", S("a12345"));
test(S("abcdefghijklmnopqrst"), 1, 19, "1234567890", S("a1234567890"));
test(S("abcdefghijklmnopqrst"), 1, 19, "12345678901234567890", S("a12345678901234567890"));
test(S("abcdefghijklmnopqrst"), 10, 0, "", S("abcdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 10, 0, "12345", S("abcdefghij12345klmnopqrst"));
test(S("abcdefghijklmnopqrst"), 10, 0, "1234567890", S("abcdefghij1234567890klmnopqrst"));
test(S("abcdefghijklmnopqrst"), 10, 0, "12345678901234567890", S("abcdefghij12345678901234567890klmnopqrst"));
test(S("abcdefghijklmnopqrst"), 10, 1, "", S("abcdefghijlmnopqrst"));
test(S("abcdefghijklmnopqrst"), 10, 1, "12345", S("abcdefghij12345lmnopqrst"));
test(S("abcdefghijklmnopqrst"), 10, 1, "1234567890", S("abcdefghij1234567890lmnopqrst"));
test(S("abcdefghijklmnopqrst"), 10, 1, "12345678901234567890", S("abcdefghij12345678901234567890lmnopqrst"));
test(S("abcdefghijklmnopqrst"), 10, 5, "", S("abcdefghijpqrst"));
test(S("abcdefghijklmnopqrst"), 10, 5, "12345", S("abcdefghij12345pqrst"));
test(S("abcdefghijklmnopqrst"), 10, 5, "1234567890", S("abcdefghij1234567890pqrst"));
test(S("abcdefghijklmnopqrst"), 10, 5, "12345678901234567890", S("abcdefghij12345678901234567890pqrst"));
test(S("abcdefghijklmnopqrst"), 10, 9, "", S("abcdefghijt"));
test(S("abcdefghijklmnopqrst"), 10, 9, "12345", S("abcdefghij12345t"));
test(S("abcdefghijklmnopqrst"), 10, 9, "1234567890", S("abcdefghij1234567890t"));
test(S("abcdefghijklmnopqrst"), 10, 9, "12345678901234567890", S("abcdefghij12345678901234567890t"));
}
template <class S>
void test2()
{
test(S("abcdefghijklmnopqrst"), 10, 10, "", S("abcdefghij"));
test(S("abcdefghijklmnopqrst"), 10, 10, "12345", S("abcdefghij12345"));
test(S("abcdefghijklmnopqrst"), 10, 10, "1234567890", S("abcdefghij1234567890"));
test(S("abcdefghijklmnopqrst"), 10, 10, "12345678901234567890", S("abcdefghij12345678901234567890"));
test(S("abcdefghijklmnopqrst"), 19, 0, "", S("abcdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 19, 0, "12345", S("abcdefghijklmnopqrs12345t"));
test(S("abcdefghijklmnopqrst"), 19, 0, "1234567890", S("abcdefghijklmnopqrs1234567890t"));
test(S("abcdefghijklmnopqrst"), 19, 0, "12345678901234567890", S("abcdefghijklmnopqrs12345678901234567890t"));
test(S("abcdefghijklmnopqrst"), 19, 1, "", S("abcdefghijklmnopqrs"));
test(S("abcdefghijklmnopqrst"), 19, 1, "12345", S("abcdefghijklmnopqrs12345"));
test(S("abcdefghijklmnopqrst"), 19, 1, "1234567890", S("abcdefghijklmnopqrs1234567890"));
test(S("abcdefghijklmnopqrst"), 19, 1, "12345678901234567890", S("abcdefghijklmnopqrs12345678901234567890"));
test(S("abcdefghijklmnopqrst"), 20, 0, "", S("abcdefghijklmnopqrst"));
test(S("abcdefghijklmnopqrst"), 20, 0, "12345", S("abcdefghijklmnopqrst12345"));
test(S("abcdefghijklmnopqrst"), 20, 0, "1234567890", S("abcdefghijklmnopqrst1234567890"));
test(S("abcdefghijklmnopqrst"), 20, 0, "12345678901234567890", S("abcdefghijklmnopqrst12345678901234567890"));
}
int main(int, char**)
{
{
typedef std::string S;
test0<S>();
test1<S>();
test2<S>();
}
#if TEST_STD_VER >= 11
{
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
test2<S>();
}
#endif
{ // test replacing into self
typedef std::string S;
S s_short = "123/";
S s_long = "Lorem ipsum dolor sit amet, consectetur/";
s_short.replace(s_short.begin(), s_short.begin(), s_short.c_str());
assert(s_short == "123/123/");
s_short.replace(s_short.begin(), s_short.begin(), s_short.c_str());
assert(s_short == "123/123/123/123/");
s_short.replace(s_short.begin(), s_short.begin(), s_short.c_str());
assert(s_short == "123/123/123/123/123/123/123/123/");
s_long.replace(s_long.begin(), s_long.begin(), s_long.c_str());
assert(s_long == "Lorem ipsum dolor sit amet, consectetur/Lorem ipsum dolor sit amet, consectetur/");
}
return 0;
}
| {
"pile_set_name": "Github"
} |
// Copyright (c) Microsoft. All Rights Reserved. Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
namespace Microsoft.CodeAnalysis.Host
{
public interface ICompilationFactoryService : ILanguageService
{
Compilation CreateCompilation(string assemblyName, CompilationOptions options);
Compilation CreateSubmissionCompilation(string assemblyName, CompilationOptions options, Type hostObjectType);
Compilation GetCompilationFromCompilationReference(MetadataReference reference);
bool IsCompilationReference(MetadataReference reference);
CompilationOptions GetDefaultCompilationOptions();
}
}
| {
"pile_set_name": "Github"
} |
#ifndef BOOST_MPL_SET_SET10_C_HPP_INCLUDED
#define BOOST_MPL_SET_SET10_C_HPP_INCLUDED
// Copyright Aleksey Gurtovoy 2000-2004
// Copyright David Abrahams 2003-2004
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/mpl for documentation.
// $Id$
// $Date$
// $Revision$
#if !defined(BOOST_MPL_PREPROCESSING_MODE)
# include <boost/mpl/set/set0_c.hpp>
# include <boost/mpl/set/set10.hpp>
#endif
#include <boost/mpl/aux_/config/use_preprocessed.hpp>
#if !defined(BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS) \
&& !defined(BOOST_MPL_PREPROCESSING_MODE)
# define BOOST_MPL_PREPROCESSED_HEADER set10_c.hpp
# include <boost/mpl/set/aux_/include_preprocessed.hpp>
#else
# include <boost/preprocessor/iterate.hpp>
namespace boost { namespace mpl {
# define BOOST_PP_ITERATION_PARAMS_1 \
(3,(1, 10, <boost/mpl/set/aux_/numbered_c.hpp>))
# include BOOST_PP_ITERATE()
}}
#endif // BOOST_MPL_CFG_NO_PREPROCESSED_HEADERS
#endif // BOOST_MPL_SET_SET10_C_HPP_INCLUDED
| {
"pile_set_name": "Github"
} |
package contenthash
import (
"context"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
"time"
"github.com/containerd/containerd/content/local"
ctdmetadata "github.com/containerd/containerd/metadata"
"github.com/containerd/containerd/snapshots"
"github.com/containerd/containerd/snapshots/native"
"github.com/moby/buildkit/cache"
"github.com/moby/buildkit/cache/metadata"
"github.com/moby/buildkit/snapshot"
containerdsnapshot "github.com/moby/buildkit/snapshot/containerd"
"github.com/moby/buildkit/util/leaseutil"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"github.com/tonistiigi/fsutil"
fstypes "github.com/tonistiigi/fsutil/types"
bolt "go.etcd.io/bbolt"
)
const (
dgstFileData0 = digest.Digest("sha256:cd8e75bca50f2d695f220d0cb0997d8ead387e4f926e8669a92d7f104cc9885b")
dgstDirD0 = digest.Digest("sha256:d47454417d2c554067fbefe5f5719edc49f3cfe969c36b62e34a187a4da0cc9a")
dgstDirD0Modified = digest.Digest("sha256:555ffa3028630d97ba37832b749eda85ab676fd64ffb629fbf0f4ec8c1e3bff1")
)
func TestChecksumSymlinkNoParentScan(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
defer cm.Close()
ch := []string{
"ADD aa dir",
"ADD aa/bb dir",
"ADD aa/bb/cc dir",
"ADD aa/bb/cc/dd file data0",
"ADD aa/ln symlink /aa",
}
ref := createRef(t, cm, ch)
cc, err := newCacheContext(ref.Metadata(), nil)
require.NoError(t, err)
dgst, err := cc.Checksum(context.TODO(), ref, "aa/ln/bb/cc/dd", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
}
func TestChecksumHardlinks(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
defer cm.Close()
ch := []string{
"ADD abc dir",
"ADD abc/foo file data0",
"ADD ln file >/abc/foo",
"ADD ln2 file >/abc/foo",
}
ref := createRef(t, cm, ch)
cc, err := newCacheContext(ref.Metadata(), nil)
require.NoError(t, err)
dgst, err := cc.Checksum(context.TODO(), ref, "abc/foo", false)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
dgst, err = cc.Checksum(context.TODO(), ref, "ln", false)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
dgst, err = cc.Checksum(context.TODO(), ref, "ln2", false)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
// validate same results with handleChange
ref2 := createRef(t, cm, nil)
cc2, err := newCacheContext(ref2.Metadata(), nil)
require.NoError(t, err)
err = emit(cc2.HandleChange, changeStream(ch))
require.NoError(t, err)
dgst, err = cc2.Checksum(context.TODO(), ref, "abc/foo", false)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
dgst, err = cc2.Checksum(context.TODO(), ref, "ln", false)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
dgst, err = cc2.Checksum(context.TODO(), ref, "ln2", false)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
// modify two of the links
ch = []string{
"ADD abc/foo file data1",
"ADD ln file >/abc/foo",
}
cc2.linkMap = map[string][][]byte{}
err = emit(cc2.HandleChange, changeStream(ch))
require.NoError(t, err)
data1Expected := "sha256:c2b5e234f5f38fc5864da7def04782f82501a40d46192e4207d5b3f0c3c4732b"
dgst, err = cc2.Checksum(context.TODO(), ref, "abc/foo", false)
require.NoError(t, err)
require.Equal(t, data1Expected, string(dgst))
dgst, err = cc2.Checksum(context.TODO(), ref, "ln", false)
require.NoError(t, err)
require.Equal(t, data1Expected, string(dgst))
dgst, err = cc2.Checksum(context.TODO(), ref, "ln2", false)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
}
func TestChecksumWildcard(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
defer cm.Close()
ch := []string{
"ADD bar file data1",
"ADD foo file data0",
"ADD fox file data2",
"ADD x dir",
"ADD x/d0 dir",
"ADD x/d0/abc file data0",
"ADD x/d0/def symlink abc",
"ADD x/d0/ghi symlink nosuchfile",
"ADD y1 symlink foo",
"ADD y2 symlink fox",
}
ref := createRef(t, cm, ch)
cc, err := newCacheContext(ref.Metadata(), nil)
require.NoError(t, err)
dgst, err := cc.ChecksumWildcard(context.TODO(), ref, "f*o", false)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
expFoos := digest.Digest("sha256:c9f914ad7ad8fe6092ce67484b43ca39c2087aabf9e4a1b223249b0f8b09b9f2")
dgst, err = cc.ChecksumWildcard(context.TODO(), ref, "f*", false)
require.NoError(t, err)
require.Equal(t, expFoos, dgst)
dgst, err = cc.ChecksumWildcard(context.TODO(), ref, "x/d?", false)
require.NoError(t, err)
require.Equal(t, dgstDirD0, dgst)
dgst, err = cc.ChecksumWildcard(context.TODO(), ref, "x/d?/def", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
dgst, err = cc.ChecksumWildcard(context.TODO(), ref, "y*", true)
require.NoError(t, err)
require.Equal(t, expFoos, dgst)
err = ref.Release(context.TODO())
require.NoError(t, err)
}
func TestChecksumWildcardWithBadMountable(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
defer cm.Close()
ref := createRef(t, cm, nil)
cc, err := newCacheContext(ref.Metadata(), nil)
require.NoError(t, err)
_, err = cc.ChecksumWildcard(context.TODO(), newBadMountable(), "*", false)
require.Error(t, err)
}
func TestSymlinksNoFollow(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
defer cm.Close()
ch := []string{
"ADD target file data0",
"ADD sym symlink target",
"ADD sym2 symlink target2",
"ADD foo dir",
"ADD foo/ghi symlink target",
"ADD y1 symlink foo/ghi",
}
ref := createRef(t, cm, ch)
cc, err := newCacheContext(ref.Metadata(), nil)
require.NoError(t, err)
expectedSym := digest.Digest("sha256:a2ba571981f48ec34eb79c9a3ab091b6491e825c2f7e9914ea86e8e958be7fae")
dgst, err := cc.ChecksumWildcard(context.TODO(), ref, "sym", false)
require.NoError(t, err)
require.Equal(t, expectedSym, dgst)
dgst, err = cc.ChecksumWildcard(context.TODO(), ref, "sym2", false)
require.NoError(t, err)
require.NotEqual(t, expectedSym, dgst)
dgst, err = cc.ChecksumWildcard(context.TODO(), ref, "foo/ghi", false)
require.NoError(t, err)
require.Equal(t, expectedSym, dgst)
_, err = cc.ChecksumWildcard(context.TODO(), ref, "foo/ghi", true) // same because broken symlink
require.Error(t, err)
require.Equal(t, true, errors.Is(err, errNotFound))
_, err = cc.ChecksumWildcard(context.TODO(), ref, "y1", true)
require.Error(t, err)
require.Equal(t, true, errors.Is(err, errNotFound))
dgst, err = cc.Checksum(context.TODO(), ref, "sym", false)
require.NoError(t, err)
require.Equal(t, expectedSym, dgst)
dgst, err = cc.Checksum(context.TODO(), ref, "foo/ghi", false)
require.NoError(t, err)
require.Equal(t, expectedSym, dgst)
err = ref.Release(context.TODO())
require.NoError(t, err)
}
func TestChecksumBasicFile(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
defer cm.Close()
ch := []string{
"ADD foo file data0",
"ADD bar file data1",
"ADD d0 dir",
"ADD d0/abc file data0",
"ADD d0/def symlink abc",
"ADD d0/ghi symlink nosuchfile",
}
ref := createRef(t, cm, ch)
// for the digest values, the actual values are not important in development
// phase but consistency is
cc, err := newCacheContext(ref.Metadata(), nil)
require.NoError(t, err)
_, err = cc.Checksum(context.TODO(), ref, "nosuch", true)
require.Error(t, err)
dgst, err := cc.Checksum(context.TODO(), ref, "foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
// second file returns different hash
dgst, err = cc.Checksum(context.TODO(), ref, "bar", true)
require.NoError(t, err)
require.Equal(t, digest.Digest("sha256:c2b5e234f5f38fc5864da7def04782f82501a40d46192e4207d5b3f0c3c4732b"), dgst)
// same file inside a directory
dgst, err = cc.Checksum(context.TODO(), ref, "d0/abc", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
// repeat because codepath is different
dgst, err = cc.Checksum(context.TODO(), ref, "d0/abc", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
// symlink to the same file is followed, returns same hash
dgst, err = cc.Checksum(context.TODO(), ref, "d0/def", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
_, err = cc.Checksum(context.TODO(), ref, "d0/ghi", true)
require.Error(t, err)
require.Equal(t, true, errors.Is(err, errNotFound))
dgst, err = cc.Checksum(context.TODO(), ref, "/", true)
require.NoError(t, err)
require.Equal(t, digest.Digest("sha256:427c9cf9ae98c0f81fb57a3076b965c7c149b6b0a85625ad4e884236649a42c6"), dgst)
dgst, err = cc.Checksum(context.TODO(), ref, "d0", true)
require.NoError(t, err)
require.Equal(t, dgstDirD0, dgst)
err = ref.Release(context.TODO())
require.NoError(t, err)
// this is same directory as previous d0
ch = []string{
"ADD abc file data0",
"ADD def symlink abc",
"ADD ghi symlink nosuchfile",
}
ref = createRef(t, cm, ch)
cc, err = newCacheContext(ref.Metadata(), nil)
require.NoError(t, err)
dgst, err = cc.Checksum(context.TODO(), ref, "/", true)
require.NoError(t, err)
require.Equal(t, dgstDirD0, dgst)
err = ref.Release(context.TODO())
require.NoError(t, err)
// test that removing broken symlink changes hash even though symlink itself can't be checksummed
ch = []string{
"ADD abc file data0",
"ADD def symlink abc",
}
ref = createRef(t, cm, ch)
cc, err = newCacheContext(ref.Metadata(), nil)
require.NoError(t, err)
dgst, err = cc.Checksum(context.TODO(), ref, "/", true)
require.NoError(t, err)
require.Equal(t, dgstDirD0Modified, dgst)
require.NotEqual(t, dgstDirD0, dgst)
err = ref.Release(context.TODO())
require.NoError(t, err)
// test multiple scans, get checksum of nested file first
ch = []string{
"ADD abc dir",
"ADD abc/aa dir",
"ADD abc/aa/foo file data2",
"ADD d0 dir",
"ADD d0/abc file data0",
"ADD d0/def symlink abc",
"ADD d0/ghi symlink nosuchfile",
}
ref = createRef(t, cm, ch)
cc, err = newCacheContext(ref.Metadata(), nil)
require.NoError(t, err)
dgst, err = cc.Checksum(context.TODO(), ref, "abc/aa/foo", true)
require.NoError(t, err)
require.Equal(t, digest.Digest("sha256:1c67653c3cf95b12a0014e2c4cd1d776b474b3218aee54155d6ae27b9b999c54"), dgst)
require.NotEqual(t, dgstDirD0, dgst)
// this will force rescan
dgst, err = cc.Checksum(context.TODO(), ref, "d0", true)
require.NoError(t, err)
require.Equal(t, dgstDirD0, dgst)
err = ref.Release(context.TODO())
require.NoError(t, err)
}
func TestHandleChange(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
defer cm.Close()
ch := []string{
"ADD foo file data0",
"ADD bar file data1",
"ADD d0 dir",
"ADD d0/abc file data0",
"ADD d0/def symlink abc",
"ADD d0/ghi symlink nosuchfile",
}
ref := createRef(t, cm, nil)
// for the digest values, the actual values are not important in development
// phase but consistency is
cc, err := newCacheContext(ref.Metadata(), nil)
require.NoError(t, err)
err = emit(cc.HandleChange, changeStream(ch))
require.NoError(t, err)
dgstFoo, err := cc.Checksum(context.TODO(), ref, "foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgstFoo)
// symlink to the same file is followed, returns same hash
dgst, err := cc.Checksum(context.TODO(), ref, "d0/def", true)
require.NoError(t, err)
require.Equal(t, dgstFoo, dgst)
// symlink to the same file is followed, returns same hash
dgst, err = cc.Checksum(context.TODO(), ref, "d0", true)
require.NoError(t, err)
require.Equal(t, dgstDirD0, dgst)
ch = []string{
"DEL d0/ghi file",
}
err = emit(cc.HandleChange, changeStream(ch))
require.NoError(t, err)
dgst, err = cc.Checksum(context.TODO(), ref, "d0", true)
require.NoError(t, err)
require.Equal(t, dgstDirD0Modified, dgst)
ch = []string{
"DEL d0 dir",
}
err = emit(cc.HandleChange, changeStream(ch))
require.NoError(t, err)
_, err = cc.Checksum(context.TODO(), ref, "d0", true)
require.Error(t, err)
require.Equal(t, true, errors.Is(err, errNotFound))
_, err = cc.Checksum(context.TODO(), ref, "d0/abc", true)
require.Error(t, err)
require.Equal(t, true, errors.Is(err, errNotFound))
err = ref.Release(context.TODO())
require.NoError(t, err)
}
func TestHandleRecursiveDir(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
defer cm.Close()
ch := []string{
"ADD d0 dir",
"ADD d0/foo dir",
"ADD d0/foo/bar dir",
"ADD d0/foo/bar/foo file data0",
"ADD d0/foo/bar/bar file data1",
"ADD d1 dir",
"ADD d1/foo file data0",
}
ref := createRef(t, cm, nil)
cc, err := newCacheContext(ref.Metadata(), nil)
require.NoError(t, err)
err = emit(cc.HandleChange, changeStream(ch))
require.NoError(t, err)
dgst, err := cc.Checksum(context.TODO(), ref, "d0/foo/bar", true)
require.NoError(t, err)
ch = []string{
"DEL d0 dir",
"DEL d0/foo dir", // the differ can produce a record for subdir as well
"ADD d1/bar file data1",
}
err = emit(cc.HandleChange, changeStream(ch))
require.NoError(t, err)
dgst2, err := cc.Checksum(context.TODO(), ref, "d1", true)
require.NoError(t, err)
require.Equal(t, dgst2, dgst)
_, err = cc.Checksum(context.TODO(), ref, "", true)
require.NoError(t, err)
}
func TestChecksumUnorderedFiles(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
defer cm.Close()
ch := []string{
"ADD d0 dir",
"ADD d0/foo dir",
"ADD d0/foo/bar file data0",
"ADD d0/foo-subdir dir",
"ADD d0/foo.subdir file data1",
}
ref := createRef(t, cm, nil)
cc, err := newCacheContext(ref.Metadata(), nil)
require.NoError(t, err)
err = emit(cc.HandleChange, changeStream(ch))
require.NoError(t, err)
dgst, err := cc.Checksum(context.TODO(), ref, "d0", true)
require.NoError(t, err)
require.Equal(t, dgst, digest.Digest("sha256:14276c302c940a80f82ca5477bf766c98a24702d6a9948ee71bb277cdad3ae05"))
// check regression from earier version that didn't track some files
ch = []string{
"ADD d0 dir",
"ADD d0/foo dir",
"ADD d0/foo/bar file data0",
}
ref = createRef(t, cm, nil)
cc, err = newCacheContext(ref.Metadata(), nil)
require.NoError(t, err)
err = emit(cc.HandleChange, changeStream(ch))
require.NoError(t, err)
dgst2, err := cc.Checksum(context.TODO(), ref, "d0", true)
require.NoError(t, err)
require.NotEqual(t, dgst, dgst2)
}
func TestSymlinkInPathScan(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
defer cm.Close()
ch := []string{
"ADD d0 dir",
"ADD d0/sub dir",
"ADD d0/sub/foo file data0",
"ADD d0/def symlink sub",
}
ref := createRef(t, cm, ch)
dgst, err := Checksum(context.TODO(), ref, "d0/def/foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
dgst, err = Checksum(context.TODO(), ref, "d0/def/foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
err = ref.Release(context.TODO())
require.NoError(t, err)
}
func TestSymlinkNeedsScan(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
defer cm.Close()
ch := []string{
"ADD c0 dir",
"ADD c0/sub dir",
"ADD c0/sub/foo file data0",
"ADD d0 dir",
"ADD d0/d1 dir",
"ADD d0/d1/def symlink ../../c0/sub",
}
ref := createRef(t, cm, ch)
// scan the d0 path containing the symlink that doesn't get followed
_, err = Checksum(context.TODO(), ref, "d0/d1", true)
require.NoError(t, err)
dgst, err := Checksum(context.TODO(), ref, "d0/d1/def/foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
err = ref.Release(context.TODO())
require.NoError(t, err)
}
func TestSymlinkAbsDirSuffix(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
defer cm.Close()
ch := []string{
"ADD c0 dir",
"ADD c0/sub dir",
"ADD c0/sub/foo file data0",
"ADD link symlink /c0/sub/",
}
ref := createRef(t, cm, ch)
dgst, err := Checksum(context.TODO(), ref, "link/foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
err = ref.Release(context.TODO())
require.NoError(t, err)
}
func TestSymlinkThroughParent(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
defer cm.Close()
ch := []string{
"ADD lib dir",
"ADD lib/sub dir",
"ADD lib/sub/foo file data0",
"ADD lib/sub/link symlink ../../lib2",
"ADD lib2 dir",
"ADD lib2/sub dir",
"ADD lib2/sub/foo file data0",
"ADD link1 symlink /lib",
"ADD link2 symlink /lib/",
"ADD link3 symlink /lib/.",
"ADD link4 symlink /lib/../lib",
"ADD link5 symlink ../lib",
}
ref := createRef(t, cm, ch)
dgst, err := Checksum(context.TODO(), ref, "link1/sub/foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
dgst, err = Checksum(context.TODO(), ref, "link2/sub/foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
dgst, err = Checksum(context.TODO(), ref, "link3/sub/foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
dgst, err = Checksum(context.TODO(), ref, "link4/sub/foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
dgst, err = Checksum(context.TODO(), ref, "link5/sub/foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
dgst, err = Checksum(context.TODO(), ref, "link1/sub/link/sub/foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
err = ref.Release(context.TODO())
require.NoError(t, err)
}
func TestSymlinkInPathHandleChange(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm, _ := setupCacheManager(t, tmpdir, "native", snapshotter)
defer cm.Close()
ch := []string{
"ADD d1 dir",
"ADD d1/sub dir",
"ADD d1/sub/foo file data0",
"ADD d1/sub/bar symlink /link",
"ADD d1/sub/baz symlink ../../../link",
"ADD d1/sub/bay symlink ../../../../link/.", // weird link
"ADD d1/def symlink sub",
"ADD sub dir",
"ADD sub/d0 dir",
"ADD sub/d0/abc file data0",
"ADD sub/d0/def symlink abc",
"ADD sub/d0/ghi symlink nosuchfile",
"ADD link symlink sub/d0",
}
ref := createRef(t, cm, nil)
cc, err := newCacheContext(ref.Metadata(), nil)
require.NoError(t, err)
err = emit(cc.HandleChange, changeStream(ch))
require.NoError(t, err)
dgst, err := cc.Checksum(context.TODO(), ref, "d1/def/foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
dgst, err = cc.Checksum(context.TODO(), ref, "d1/def/bar/abc", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
dgstFileData0, err := cc.Checksum(context.TODO(), ref, "sub/d0", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgstDirD0)
dgstFileData0, err = cc.Checksum(context.TODO(), ref, "d1/def/baz", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgstDirD0)
dgstFileData0, err = cc.Checksum(context.TODO(), ref, "d1/def/bay", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgstDirD0)
dgstFileData0, err = cc.Checksum(context.TODO(), ref, "link", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgstDirD0)
err = ref.Release(context.TODO())
require.NoError(t, err)
}
func TestPersistence(t *testing.T) {
t.Parallel()
tmpdir, err := ioutil.TempDir("", "buildkit-state")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
snapshotter, err := native.NewSnapshotter(filepath.Join(tmpdir, "snapshots"))
require.NoError(t, err)
cm, closeBolt := setupCacheManager(t, tmpdir, "native", snapshotter)
defer cm.Close()
ch := []string{
"ADD foo file data0",
"ADD bar file data1",
"ADD d0 dir",
"ADD d0/abc file data0",
"ADD d0/def symlink abc",
"ADD d0/ghi symlink nosuchfile",
}
ref := createRef(t, cm, ch)
id := ref.ID()
dgst, err := Checksum(context.TODO(), ref, "foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
err = ref.Release(context.TODO())
require.NoError(t, err)
ref, err = cm.Get(context.TODO(), id)
require.NoError(t, err)
dgst, err = Checksum(context.TODO(), ref, "foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
err = ref.Release(context.TODO())
require.NoError(t, err)
time.Sleep(100 * time.Millisecond) // saving happens on the background
// we can't close snapshotter and open it twice (especially, its internal bbolt store)
cm.Close()
closeBolt()
getDefaultManager().lru.Purge()
cm, closeBolt = setupCacheManager(t, tmpdir, "native", snapshotter)
defer closeBolt()
defer cm.Close()
ref, err = cm.Get(context.TODO(), id)
require.NoError(t, err)
dgst, err = Checksum(context.TODO(), ref, "foo", true)
require.NoError(t, err)
require.Equal(t, dgstFileData0, dgst)
}
func createRef(t *testing.T, cm cache.Manager, files []string) cache.ImmutableRef {
if runtime.GOOS == "windows" && len(files) > 0 {
// lm.Mount() will fail
t.Skip("Depends on unimplemented containerd bind-mount support on Windows")
}
mref, err := cm.New(context.TODO(), nil, cache.CachePolicyRetain)
require.NoError(t, err)
mounts, err := mref.Mount(context.TODO(), false)
require.NoError(t, err)
lm := snapshot.LocalMounter(mounts)
mp, err := lm.Mount()
require.NoError(t, err)
err = writeChanges(mp, changeStream(files))
lm.Unmount()
require.NoError(t, err)
ref, err := mref.Commit(context.TODO())
require.NoError(t, err)
return ref
}
func setupCacheManager(t *testing.T, tmpdir string, snapshotterName string, snapshotter snapshots.Snapshotter) (cache.Manager, func()) {
md, err := metadata.NewStore(filepath.Join(tmpdir, "metadata.db"))
require.NoError(t, err)
store, err := local.NewStore(tmpdir)
require.NoError(t, err)
db, err := bolt.Open(filepath.Join(tmpdir, "containerdmeta.db"), 0644, nil)
require.NoError(t, err)
mdb := ctdmetadata.NewDB(db, store, map[string]snapshots.Snapshotter{
snapshotterName: snapshotter,
})
cm, err := cache.NewManager(cache.ManagerOpt{
Snapshotter: snapshot.FromContainerdSnapshotter(snapshotterName, containerdsnapshot.NSSnapshotter("buildkit", mdb.Snapshotter(snapshotterName)), nil),
MetadataStore: md,
LeaseManager: leaseutil.WithNamespace(ctdmetadata.NewLeaseManager(mdb), "buildkit"),
ContentStore: mdb.ContentStore(),
GarbageCollect: mdb.GarbageCollect,
})
require.NoError(t, err)
return cm, func() {
db.Close()
}
}
type badMountable struct{}
func (bm *badMountable) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) {
return nil, errors.New("tried to mount bad mountable")
}
// newBadMountable returns a cache.Mountable that will fail to mount, for use in APIs
// that require a Mountable, but which should never actually try to access the filesystem.
func newBadMountable() cache.Mountable {
return &badMountable{}
}
// these test helpers are from tonistiigi/fsutil
type change struct {
kind fsutil.ChangeKind
path string
fi os.FileInfo
data string
}
func changeStream(dt []string) (changes []*change) {
for _, s := range dt {
changes = append(changes, parseChange(s))
}
return
}
func parseChange(str string) *change {
f := strings.Fields(str)
errStr := fmt.Sprintf("invalid change %q", str)
if len(f) < 3 {
panic(errStr)
}
c := &change{}
switch f[0] {
case "ADD":
c.kind = fsutil.ChangeKindAdd
case "CHG":
c.kind = fsutil.ChangeKindModify
case "DEL":
c.kind = fsutil.ChangeKindDelete
default:
panic(errStr)
}
c.path = f[1]
st := &fstypes.Stat{}
switch f[2] {
case "file":
if len(f) > 3 {
if f[3][0] == '>' {
st.Linkname = f[3][1:]
} else {
c.data = f[3]
st.Size_ = int64(len(f[3]))
}
}
st.Mode |= 0644
case "dir":
st.Mode |= uint32(os.ModeDir)
st.Mode |= 0755
case "symlink":
if len(f) < 4 {
panic(errStr)
}
st.Mode |= uint32(os.ModeSymlink)
st.Linkname = f[3]
st.Mode |= 0777
}
c.fi = &fsutil.StatInfo{Stat: st}
return c
}
func emit(fn fsutil.HandleChangeFn, inp []*change) error {
for _, c := range inp {
stat, ok := c.fi.Sys().(*fstypes.Stat)
if !ok {
return errors.Errorf("invalid non-stat change %s", c.fi.Name())
}
fi := c.fi
if c.kind != fsutil.ChangeKindDelete {
h, err := NewFromStat(stat)
if err != nil {
return err
}
if _, err := io.Copy(h, strings.NewReader(c.data)); err != nil {
return err
}
fi = &withHash{FileInfo: c.fi, digest: digest.NewDigest(digest.SHA256, h)}
}
if err := fn(c.kind, c.path, fi, nil); err != nil {
return err
}
}
return nil
}
type withHash struct {
digest digest.Digest
os.FileInfo
}
func (wh *withHash) Digest() digest.Digest {
return wh.digest
}
func writeChanges(root string, inp []*change) error {
for _, c := range inp {
if c.kind == fsutil.ChangeKindAdd {
p := filepath.Join(root, c.path)
stat, ok := c.fi.Sys().(*fstypes.Stat)
if !ok {
return errors.Errorf("invalid non-stat change %s", p)
}
if c.fi.IsDir() {
// The snapshot root ('/') is always created with 0755.
// We use the same permission mode here.
if err := os.Mkdir(p, 0755); err != nil {
return errors.WithStack(err)
}
} else if c.fi.Mode()&os.ModeSymlink != 0 {
if err := os.Symlink(stat.Linkname, p); err != nil {
return errors.WithStack(err)
}
} else if len(stat.Linkname) > 0 {
link := filepath.Join(root, stat.Linkname)
if !filepath.IsAbs(link) {
link = filepath.Join(filepath.Dir(p), stat.Linkname)
}
if err := os.Link(link, p); err != nil {
return errors.WithStack(err)
}
} else {
f, err := os.Create(p)
if err != nil {
return errors.WithStack(err)
}
if len(c.data) > 0 {
if _, err := f.Write([]byte(c.data)); err != nil {
return errors.WithStack(err)
}
}
f.Close()
}
}
}
return nil
}
| {
"pile_set_name": "Github"
} |
<?php
/*
* This file is part of PHPExifTool.
*
* (c) 2012 Romain Neutron <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace PHPExiftool\Driver\Tag\XMPPur;
use JMS\Serializer\Annotation\ExclusionPolicy;
use PHPExiftool\Driver\AbstractTag;
/**
* @ExclusionPolicy("all")
*/
class Agreement extends AbstractTag
{
protected $Id = 'agreement';
protected $Name = 'Agreement';
protected $FullName = 'XMP::pur';
protected $GroupName = 'XMP-pur';
protected $g0 = 'XMP';
protected $g1 = 'XMP-pur';
protected $g2 = 'Document';
protected $Type = 'string';
protected $Writable = true;
protected $Description = 'Agreement';
protected $flag_Avoid = true;
protected $flag_List = true;
protected $flag_Bag = true;
}
| {
"pile_set_name": "Github"
} |
---
title: "eM Client"
permalink: /software/emclient/
excerpt: "Email Encryption"
modified: 2017-09-27T09:00:00-00:00
---
eM Client is a versatile email client with calendar, tasks and chat. It supports both PGP/MIME and traditional armored PGP and focuses on ease of use. eM Client synchronizes with Gmail, Office 365, Exchange and any IMAP/POP3/CalDAV/CardDAV/XMPP server.
### Key Facts
* Developer/Publisher: [eM Client s.r.o.](https://www.emclient.com/)
* License: Proprietary (Closed Source)
* Price: eM Client offers two types of licenses
* Free license for non-commercial use with limitation to two accounts
* Commercial license for 49.95USD
* Web: [https://www.emclient.com](https://www.emclient.com)
* Help:
* [Forum](https://forum.emclient.com/emclient) for Free license users
* [Pro support](https://support.emclient.com/) for commercial license users
* [FAQ](https://www.emclient.com/faq-getting-started) | {
"pile_set_name": "Github"
} |
from .data_init import (
db_operate, convert_timescaledb, init_services, init_resources,
init_default_roles, update_default_roles, init_admin_account,
init_dict_code, init_system_info, init_lwm2m_info, create_triggers
)
from .supervisord import supervisord_config
__all__ = ['ProjectManage']
class ProjectManage:
@staticmethod
def project_deploy():
db_operate(execute_type='deploy')
convert_timescaledb()
create_triggers()
init_services()
init_resources()
init_default_roles()
init_admin_account()
init_dict_code()
init_system_info()
init_lwm2m_info()
supervisord_config()
@staticmethod
def project_upgrade():
db_operate(execute_type='upgrade')
init_services()
init_resources()
update_default_roles()
init_dict_code()
init_system_info()
init_lwm2m_info()
supervisord_config()
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!--
/**
* Copyright © Magento, Inc. All rights reserved.
* See COPYING.txt for license details.
*/
-->
<actionGroups xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="urn:magento:mftf:Test/etc/actionGroupSchema.xsd">
<actionGroup name="AdminOpenMediaGalleryFromCategoryImageUploaderActionGroup">
<annotations>
<description>Opens Enhanced MediaGallery from image uploader on category page</description>
</annotations>
<conditionalClick stepKey="clickExpandContent" selector="{{AdminCategoryContentSection.sectionHeader}}" dependentSelector="{{AdminCategoryContentSection.selectFromGalleryButton}}" visible="false" />
<waitForElementVisible selector="{{AdminCategoryContentSection.selectFromGalleryButton}}" stepKey="waitForSelectFromGallery" />
<click selector="{{AdminCategoryContentSection.selectFromGalleryButton}}" stepKey="clickSelectFromGallery" />
<waitForPageLoad stepKey="waitForPageLoad"/>
</actionGroup>
</actionGroups>
| {
"pile_set_name": "Github"
} |
#!/bin/bash
DIST=../../web/dist
RSC=$DIST/static
# resources from web dist
#IMG=$RSC/img
JS=$RSC/js
CSS=$RSC/css
FONTS=$RSC/fonts
IMG=$RSC/img
backup() {
# bak_files="img js css fonts"
bak_files="static"
dir=backup-$(date +%y_%m_%d-%H:%M:%S)
mkdir $dir
mv $bak_files $dir 2>/dev/null
}
update_web_files() {
mkdir static
mkdir static/css
cp -rf $CSS/*.css static/css
mkdir static/js
cp -rf $JS/*.js static/js
mkdir static/fonts
cp -rf $FONTS/*.woff2 static/fonts
cp -rf $IMG static/
rm -f static/img/*.png
}
modify_css() {
css_file=$(cd static/css && ls *.css)
if [[ -z $css_file ]]; then
echo modify_css: no css file
exit 2
fi
echo modify css
sed -i "s|/static/|../|g" static/css/$css_file
}
del_js_link() {
index=index.html
sed -i '/<script type="text\/javascript" src="/d' $index
}
del_css_link() {
index=index.html
sed -i '/<link href="static\/css/d' $index
}
add_js_link() {
pattern=$1
js=$(cd static/js && ls $pattern*.js)
if [[ -z $js ]]; then
echo add_js_link: no $pattern js file
exit 3
fi
link="<script type=\"text/javascript\" src=\"static/js/$js\"></script>"
sed -i "/<\/body>/ i $link" $index
}
add_css_link(){
pattern=$1
css=$(cd static/css && ls $pattern*.css)
if [[ -z $css ]]; then
echo add_css_link: no $pattern css file
exit 4
fi
link="<link href=\"static/css/$css\" rel=\"stylesheet\">"
sed -i "/<\/title>/ a $link" $index
}
modify_index() {
index=index.html
echo regenerate js link in $index
del_js_link
add_js_link manifest
add_js_link vendor
add_js_link app
echo regenerate css link in $index
del_css_link
add_css_link app
}
generate() {
backup
echo get web dist files
update_web_files
modify_css
modify_index
}
clear_generated_files() {
echo clear autogen files
rm -rf static
rm -rf backup-*
echo clear links in index.html
del_js_link
del_css_link
}
show_help() {
echo "
Usage: $0 [option]
Options:
gen: Generate files taht used to build mobile APP, it requires web static files in $DIST
clear: Clear all files generated automatically
help: Show current msg
Example:
1. $0 gen
- generate files to build mobile APP
2. $0 clear
- clear files
3. $0 help
- show help info
"
}
case $1 in
gen | generate )
generate
exit $?
;;
clear )
echo clear
clear_generated_files
exit $?
;;
* )
show_help
exit $?
esac
| {
"pile_set_name": "Github"
} |
from __future__ import print_function
import os
from time import time
import sys
import branding
import common
import hooks
import fileutil
import metrics
import protocols
import social
import string
import threading
import traceback
import simplejson
from rpc.jsonrpc import Dsuccess
from social.network import SocialFeed
from util import callbacks
from util.net import UrlQuery
from util.primitives.error_handling import traceguard
from util.primitives.funcs import Delegate
from util.primitives.mapping import Storage, to_storage
from path import path
from copy import deepcopy
import gui.infobox.providers as gui_providers
import gui.infobox.interfaces as gui_interfaces
import wx.webview
from oauth.oauth import OAuthToken
from logging import getLogger; log = getLogger('twitter')
import twitter_auth
#import twitter_xauth as twitter_auth
USE_REALTIME_STREAM = False or '--realtime' in sys.argv
SHOW_INVITE_DM_DIALOG_ON_CREATE = False
RES_PATH = (path(__file__).parent.abspath() / 'res').normpath()
APP_PATH = (RES_PATH / 'app.html').normpath()
#from twitter_tweets import DIGSBY_TWEET_MESSAGE
def LINK():
return branding.get('digsby.twitter.achievements.link', 'digsby_twitter', 'http://bit.ly/r2d24u')
def DEMO_VIDEO_LINK():
return branding.get('digsby.twitter.demovideo.link', 'digsby_twitter', 'http://bit.ly/clMDW5')
def new_reply_text(screen_name, text):
import twitter_util as tutil
s = '@' + screen_name + ' '
cursor_position = len(s)
hashtags = ' '.join(set(tutil.hashtag.findall(text)))
if hashtags: s += ' ' + hashtags
return s, cursor_position
def prefill_reply(options):
'''Returns the text the popup reply button input field is prefilled with.'''
tweet = options['tweet']
return new_reply_text(tweet.user.screen_name, tweet.text.decode('xml'))
def prefill_retweet(options):
'''Returns the text the popup retweet button input field is prefilled with.'''
tweet = options['tweet']
return 'RT @' + tweet.user.screen_name + ' ' + tweet.text.decode('xml')
def prefill_direct(options):
'''Returns the text the popup retweet button input field is prefilled with.'''
tweet = options['tweet']
return 'd ' + tweet.user.screen_name + ' '
class TwitterAccount(social.network):
service = protocol = 'twitter'
_dirty = True
update_mixin_timer = False
@callbacks.callsback
def SetStatusMessage(self, message, reply_to=None, callback=None, **k):
@wx.CallAfter
def after():
def error(err):
callback.error(Exception('Error sending tweet'))
self.twitter_protocol.on_status(message,
reply_id=reply_to,
success=callback.success,
error=error)
# console shortcuts
@property
def j(self):
return self.connection.webkitcontroller.evaljs
@property
def w(self):
return self.connection.webkitcontroller.webview
def update_now(self):
# if we're in "failed to connect" then just try reconnecting.
if self.state == self.Statuses.OFFLINE and \
self.offline_reason == self.Reasons.CONN_FAIL:
self.Connect()
else:
self.twitter_protocol.update()
@classmethod
def tray_icon_class(cls):
from .twitter_gui import TwitterTrayIcon
return TwitterTrayIcon
def menu_actions(self, menu):
from .twitter_gui import menu_actions
menu_actions(self, menu)
@property
def connection(self):
return self.twitter_protocol
def update_info(self, **info):
'''new account info arrives from network/account dialog'''
for item in ['do_follow_digsby', 'do_tweet_about_digsby']:
info.pop(item, None)
# if the user changes the password at runtime, then clear the oauth token
if info.get('password', None) and self.password and info['password'] != self.password:
log.critical('clearing oauth token')
info['oauth_token'] = None
super(TwitterAccount, self).update_info(**info)
self.set_account_opts()
def get_options(self):
'''return the set of values to be serialized to the server'''
opts = super(TwitterAccount, self).get_options()
opts.update({'informed_ach': True, 'post_ach_all': False})
for k in self.protocol_info()['defaults'].iterkeys():
v = getattr(self, k)
if v != self.default(k):
opts[k] = v
if self.oauth_token is not None:
opts['oauth_token'] = self.oauth_token
api_server = getattr(self, 'api_server', None)
if api_server is not None:
opts['api_server'] = api_server
return opts
def account_prefs(self):
return [('autoscroll_when_at_bottom', 'twitter.autoscroll.when_at_bottom', True), ]
def set_account_opts(self):
if self.twitter_protocol is not None:
opts = self.get_account_opts()
self.twitter_protocol.set_options(opts)
def on_pref_change(self, *a, **k):
@wx.CallAfter
def after():
try:
timer = self._preftimer
except AttributeError:
timer = self._preftimer = wx.PyTimer(self.set_account_opts)
timer.StartOneShot(500)
@property
def update_frequencies(self):
return dict((a, getattr(self, a)) for a in
('friends_timeline',
'direct_messages',
'replies',
'search_updatefreq'))
def __init__(self, **options):
for key in self.protocol_info()['defaults'].iterkeys():
try: val = options[key]
except KeyError: val = self.default(key)
setattr(self, key, val)
self.oauth_token = options.pop('oauth_token', None)
self._on_online = Delegate()
self.count = None
self.twitter_protocol = None
if False:
# TODO: this will take an hour to come back from idle
@guithread
def later():
self.idle_timer = wx.PyTimer(self.on_idle_timer)
MINUTE_MS = 60 * 1000 * 60
self.idle_timer.StartRepeating(30 * MINUTE_MS)
social.network.__init__(self, **options)
self.header_funcs = [
(_('Home'), 'http://twitter.com'),
(_('Profile'), lambda: wx.LaunchDefaultBrowser('http://twitter.com/' + self.twitter_username)),
(_('Followers'), 'http://twitter.com/followers'),
(_('Following'), 'http://twitter.com/following'),
]
import twitter_notifications as twitter_notifications
twitter_notifications._register_hooks()
# options that affect first creation of the account.
# account dialog will call onCreate
self.do_follow_digsby = options.pop('do_follow_digsby', False)
# self.do_tweet_about_digsby = options.pop('do_tweet_about_digsby', False)
for twitter_pref, digsby_pref, default in self.account_prefs():
common.profile.prefs.link(digsby_pref, self.on_pref_change)
#self.extra_header_func = (_('Invite Friends'), self.on_invite_friends)
self.api_server = options.get('api_server', None)
@property
def twitter_username(self):
assert wx.IsMainThread()
return self.j('account.selfScreenNameLower')
def on_invite_friends(self):
'''show a dialog asking ot direct message followers, inviting them to digsby'''
from .twitter_gui import show_acheivements_dialog
show_acheivements_dialog(lambda: self.j('inviteFollowers();'))
def on_idle_timer(self):
if self.twitter_protocol is not None:
import gui.native.helpers
from common import pref
idle = gui.native.helpers.GetUserIdleTime() > pref('twitter.idle_time', type=int, default=(10 * 60 * 1000))
val = 'true' if idle else 'false'
self.j('window.userIdle = %s;' % val)
def onCreate(self):
'''called just after this account type is created by the user'''
if self.do_follow_digsby:
self._on_online += lambda: self.twitter_protocol.webkitcontroller.JSCall('follow', screen_name='digsby')
# if self.do_tweet_about_digsby:
# self._on_online += lambda: self.twitter_protocol.on_status(DIGSBY_TWEET_MESSAGE())
if SHOW_INVITE_DM_DIALOG_ON_CREATE:
wx.CallAfter(self.on_invite_friends)
def get_account_opts(self):
opts = dict((a, getattr(self, a))
for a in self.protocol_info()['defaults'])
for twitter_pref, digsby_pref, default in self.account_prefs():
opts[twitter_pref] = common.pref(digsby_pref, default)
opts['demovideo_link'] = DEMO_VIDEO_LINK()
api_server = getattr(self, 'api_server', None)
log.warning('api_server: %r', api_server)
if api_server is not None:
opts['apiRoot'] = api_server
return opts
def on_state_change(self, state):
log.info('on_state_change: %r', state)
if state == 'online':
self.change_state(self.Statuses.ONLINE)
self._on_online.call_and_clear()
elif state == 'autherror':
self.set_offline(self.Reasons.BAD_PASSWORD)
elif state == 'oautherror':
if self._should_retry_oauth():
log.warning('negotiating new OAuth token')
metrics.event('Twitter OAuth Refresh Token')
self.Disconnect(set_state=False)
self.oauth_token = None
self.Connect()
else:
self.set_offline(self.Reasons.BAD_PASSWORD)
elif state == 'connfail':
self.set_offline(self.Reasons.CONN_FAIL)
self.Disconnect(set_state=False)
_last_oauth_retry_time = 0
def _should_retry_oauth(self):
now = time()
if now - self._last_oauth_retry_time > 60*2:
self._last_oauth_retry_time = now
return True
def Connect(self):
@guithread
def _connect():
log.warning('twitter Connect')
self.change_state(self.Statuses.CONNECTING)
if self.twitter_protocol is not None:
self.twitter_protocol.disconnect()
self.twitter_protocol = TwitterProtocol(self.username, self._decryptedpw())
self.json = self.twitter_protocol.json
self.twitter_protocol.account = self
self.twitter_protocol.connect(self.get_account_opts())
e = self.twitter_protocol.events
e.state_changed += self.on_state_change
e.on_unread_counts += self.on_unread_counts
e.on_feeds += self.on_feeds
e.recent_timeline += self.invalidate_infobox
e.self_tweet += self.invalidate_infobox
e.status_update_clicked += self.update_status_window_needed
e.on_corrupted_database += self.on_corrupted_database
e.update_social_ids += self.on_update_social_ids
e.received_whole_update += self.on_received_whole_update
def _get_database_path(self):
return self.connection._get_database_path()
def on_corrupted_database(self):
'''
The webkit control window signals this method via "D.rpc('on_corrupted_database');"
when sqlite indicates that the database is corrupted, or if the openDatabase call
returns undefined.
We try to remove the database file entirely, and then reconnect.
'''
if getattr(self, 'did_attempt_recovery', False):
log.info('skipping on_corrupted_database, already done once')
return
log.info('corrupted_database detected')
log.info('free disk space: %r', fileutil.free_disk_space())
if self.connection:
dbpath = self._get_database_path()
log.info(' path to database: %r', dbpath)
if dbpath:
result = try_opening_tempfile(os.path.dirname(dbpath))
log.info('opening tempfile: %r', result)
self.Disconnect()
def disconnected():
try:
log.info(' attempting delete')
os.remove(dbpath)
except Exception:
traceback.print_exc()
else:
log.info('success! reconnecting')
self.Connect()
self.did_attempt_recovery = True
wx.CallLater(1000, disconnected)
def on_feeds(self, feeds):
self.invalidate_infobox()
def observe_count(self, callback):
self.add_gui_observer(callback, 'count')
def unobserve_count(self, callback):
self.remove_gui_observer(callback, 'count')
def on_unread_counts(self, opts):
self.setnotify('count', opts.get('total'))
self.invalidate_infobox()
def invalidate_infobox(self, *a, **k):
self.on_update_social_ids()
self.set_infobox_dirty()
def on_received_whole_update(self):
self.did_receive_whole_update = True
def on_update_social_ids(self):
if self.state == self.Statuses.ONLINE and getattr(self, 'did_receive_whole_update', False):
self.twitter_protocol.update_social_ids()
def set_infobox_dirty(self):
self._dirty = True
self.notify('dirty')
def disconnect(self):
self.Disconnect()
def Disconnect(self, *a, **k):
log.warning('twitter Disconnect')
if self.twitter_protocol is not None:
@guithread
def after():
p, self.twitter_protocol = self.twitter_protocol, None
e = p.events
e.state_changed -= self.on_state_change
e.on_unread_counts -= self.on_unread_counts
e.recent_timeline -= self.invalidate_infobox
e.self_tweet -= self.invalidate_infobox
e.on_feeds -= self.on_feeds
e.status_update_clicked -= self.update_status_window_needed
e.on_corrupted_database -= self.on_corrupted_database
e.update_social_ids -= self.on_update_social_ids
e.received_whole_update -= self.on_received_whole_update
p.disconnect()
@guithread
def after2():
set_state = k.pop('set_state', True)
if set_state:
self.set_offline(self.Reasons.NONE)
self.did_receive_whole_update = False
success = k.pop('success', None)
if success is not None:
success()
def DefaultAction(self):
if self.twitter_protocol is not None and self.state == self.Statuses.ONLINE:
self.twitter_protocol.open_timeline_window()
def update_status_window_needed(self):
if common.pref('social.use_global_status', default=False, type=bool):
wx.GetApp().SetStatusPrompt([self])
else:
self.twitter_protocol.open_timeline_window()
def _enable_unread_counts(self):
self.connection.set_account_pref('show_unread_count', True)
self.on_unread_counts({'total':self.count})
def _disable_unread_counts(self):
self.connection.set_account_pref('show_unread_count', False)
self.on_unread_counts({'total':self.count})
def should_show_unread_counts(self):
return _get_account_pref(self.username, 'show_unread_count', True)
def count_text_callback(self, txt):
if self.should_show_unread_counts() and self.count is not None:
return txt + (' (%s)' % self.count)
else:
return txt
def mark_all_as_read(self):
self.connection.mark_all_as_read()
class TwitterProtocol(object):
event_names = '''
state_changed
following
reply
trends
on_unread_counts
on_feeds
on_edit_feed
on_view
on_change_view
status_update_clicked
recent_timeline
self_tweet
on_corrupted_database
update_social_ids
received_whole_update
'''.split()
def __init__(self, username, password):
self.username = username
self.password = password
self.recent_timeline = []
self.self_tweet = None
self.trends = {}
self.feeds = []
self.feeds_by_name = {}
self.unread_counts = []
e = self.events = Storage((name, Delegate()) for name in self.event_names)
e.following += self.on_following
e.trends += self.on_trends
e.on_unread_counts += self.on_unread_counts
e.recent_timeline += self.on_recent_timeline
e.self_tweet += self.on_self_tweet
e.on_feeds += self.on_feeds
e.on_change_view += self.on_change_view
e.on_view += self.on_view_changed
def render_tweets(tweets, render_context):
return htmlize_tweets(self, tweets)
self.social_feed = SocialFeed('twitter_' + self.username,
'twitter_' + self.username,
self.get_tweet_feed,
render_tweets,
lambda: self.account.set_infobox_dirty)
def _get_database_path(self):
webview = self.webkitcontroller.webview
return webview.GetDatabasePath('digsbysocial_' + self.username)
def set_options(self, options):
guithread(lambda: self.webkitcontroller.JSCall('setAccountOptions', **options))
def on_change_view(self, feed_name):
log.info('on_change_view %r', feed_name)
window = self.webkitcontroller.FeedWindow
if window is not None:
log.info(' found a window, calling switch_to_view')
window.switch_to_view(feed_name)
tlw = window.Top
if tlw.IsIconized(): tlw.Iconize(False)
window.Top.Raise()
else:
log.info(' no window found, calling open_timeline_window')
self.webkitcontroller.open_timeline_window(feed_name)
def on_view_changed(self, feed_name):
feed = self.feeds_by_name.get(feed_name, None)
if feed is not None and feed.get('query', None) is not None and feed.get('save', False):
hooks.notify('digsby.statistics.twitter.viewed_search')
def on_feeds(self, feeds):
self.feeds = feeds
self.feeds_by_name = dict((f['name'], f) for f in feeds)
self.feeds_by_name.update(favorites=dict(name='favorites', label=_('Favorites')),
history=dict(name='history', label=_('History')))
import twitter_notifications as tnots
tnots._update_notifications(self, feeds)
self._save_feeds(feeds)
def _save_feeds(self, feeds):
# don't include non-saved searches
def should_save(f):
return f['type'] not in ('search', 'user') or f.get('save', False)
feeds_pref = filter(should_save, deepcopy(feeds))
# don't serialize certain attributes out to prefs
for feed in feeds_pref:
for attr in ('count', 'label'):
feed.pop(attr)
self.set_account_pref('feeds', feeds_pref)
@property
def account_prefix(self):
return 'twitter.' + self.username
def account_pref_key(self, name):
return _account_pref_key(self.username, name)
def set_account_pref(self, name, value):
from common import setpref
value = simplejson.dumps(value)
setpref(self.account_pref_key(name), value)
def get_account_pref(self, name, default):
return _get_account_pref(self.username, name, default)
def on_unread_counts(self, opts):
self.unread_counts = opts.get('feeds')
self.unread_total = opts.get('total')
def on_recent_timeline(self, tweets):
self.recent_timeline = [to_storage(t) for t in tweets]
self.recent_timeline.reverse()
self.events.update_social_ids()
def update_social_ids(self):
try:
t = self._socialtimer
except AttributeError:
def later():
ids = [p['id'] for p in self.recent_timeline]
self.social_feed.new_ids(ids)
t = self._socialtimer = wx.PyTimer(later)
if not t.IsRunning():
t.StartOneShot(1000)
def on_self_tweet(self, tweet):
self.self_tweet = to_storage(tweet)
def on_following(self, ids):
# TODO: stop should actually do something
if hasattr(self, 'stream'):
self.stream.stop()
if common.pref('twitter.streaming', default=False):
from twitterstream import TwitterStream
self.stream = TwitterStream(self.username, self.password, ids)
self.stream.on_tweet += self.on_stream_tweet
self.stream.start()
def on_trends(self, trends):
# TODO: store trends over time?
#self.trends.update(trends['trends'])
trends = trends['trends']
self.trends = trends[trends.keys()[0]]
def on_stream_tweet(self, tweet):
if self.webkitcontroller is not None:
wx.CallAfter(self.webkitcontroller.realtime_tweet, tweet)
def connect(self, accountopts):
@guithread
def later():
self.webkitcontroller = TwitterWebKitController(self)
self.webkitcontroller.initialize(self.username,
self.password,
self.get_user_feeds(),
accountopts)
self.init_webkit_methods()
def _verify_databases(self):
# webkit doesn't release file object locks for corrupted databases,
# so check the integrity of the databases we care about here first.
# upon any errors, they are deleted.
import sqlite3
def try_query_remove_on_error(dbpath, query):
'''try a query on database dbpath. dbpath is deleted on any
exception.'''
dbpath = path(dbpath)
log.info('verifying db %r', dbpath)
if not dbpath.isfile():
log.info('not a file')
return
try:
conn = sqlite3.connect(dbpath)
with conn:
conn.execute(query)
conn.close()
except Exception:
traceback.print_exc()
with traceguard:
log.warning('exception encountered, removing %r', dbpath)
dbpath.remove()
log.warning('remove completed')
# check the integrity of the "index" database that webkit uses to track
# each site's database
try_query_remove_on_error(
path(self.webkitcontroller.webview.GetDatabaseDirectory()) / 'Databases.db',
'select * from Databases limit 1')
# calling window.openDatabase is necessary once for the below
# _get_database_path() call to work.
self.webkitcontroller.webview.RunScript(
'''var test_db = window.openDatabase('_test_db_', "1.0", "test db", 1024);''')
# ensure the twitter database is okay.
try_query_remove_on_error(
self._get_database_path(),
'create table if not exists _test (foo int)')
def get_user_feeds(self):
def deffeed(n):
return dict(name=n, type=n)
default_feeds = [deffeed(n) for n in
('timeline', 'mentions', 'directs')]
userfeeds = self.get_account_pref('feeds', default_feeds)
def revert():
log.warning('REVERTING user feeds, was %r:', userfeeds)
self.set_account_pref('feeds', default_feeds)
return default_feeds
from pprint import pprint; pprint(userfeeds)
if not isinstance(userfeeds, list):
return revert()
try:
if userfeeds is not default_feeds:
for feed in default_feeds:
for ufeed in userfeeds:
if feed['type'] == ufeed['type']:
break
else:
return revert()
except Exception:
traceback.print_exc()
return revert()
return userfeeds
def init_webkit_methods(self):
# forward some methods to webkitcontroller
for method_name in '''
open_timeline_window
clear_cache
update
on_status
on_status_with_error_popup
add_feed
edit_feed
delete_feed
set_feeds
add_group
get_users
get_prefs'''.split():
setattr(self, method_name, getattr(self.webkitcontroller, method_name))
def json(self, *a, **k):
self.webkitcontroller.json(*a, **k)
def disconnect(self):
self.webkitcontroller.disconnect()
def mark_all_as_read(self):
self.webkitcontroller.evaljs('markAllAsRead();')
def on_reply(self, id, screen_name, text):
from .twitter_gui import TwitterFrame
TwitterFrame.Reply(id, screen_name, text)
def on_retweet(self, id, screen_name, text):
from .twitter_gui import TwitterFrame
TwitterFrame.Retweet(id, screen_name, text)
def on_direct(self, screen_name):
from .twitter_gui import TwitterFrame
TwitterFrame.Direct(screen_name)
def mark_feed_as_read(self, feed_name):
self.webkitcontroller.JSCall('markFeedAsRead', feedName=feed_name)
def toggle_addstocount(self, feed_name):
self.webkitcontroller.JSCall('toggleAddsToCount', feedName=feed_name)
def get_ids_and_context(self, _feed_context):
#_feed_context ?= tab
return list(t['id'] for t in self.get_tweet_feed()), self.recent_timeline
def get_tweet_feed(self):
self_id = self.self_tweet['id'] if self.self_tweet is not None else None
for tweet in self.recent_timeline:
if self_id is None or self_id != tweet['id']:
yield tweet
class TwitterWebKitController(object):
def __init__(self, protocol):
from .twitter_gui import TwitterWebView
self.hidden_frame = wx.Frame(None)
self.protocol = protocol
w = self.webview = TwitterWebView(self.hidden_frame, protocol)
w._setup_logging(log)
from rpc.jsonrpc import JSPythonBridge
self.bridge = JSPythonBridge(w)
self.bridge.on_call += self.on_call
w.Bind(wx.webview.EVT_WEBVIEW_LOAD, self.on_load)
self.when_load = None
# button callbacks for popups
self.tweet_buttons = [(_('Reply'), input_callback(self.on_popup_reply, prefill_reply)),
(_('Retweet'), input_callback(self.on_popup_retweet, prefill_retweet))]
self.direct_buttons = [(_('Direct'), input_callback(self.on_popup_direct, prefill_direct))]
@property
def FeedWindow(self):
from .twitter_gui import TwitterFrame
for win in wx.GetTopLevelWindows():
if isinstance(win, TwitterFrame):
if win.Parent.Top is self.hidden_frame:
return win.panel.webview
def JSCall(self, method, **opts):
if not wx.IsMainThread():
raise AssertionError('JSCall called from thread ' + threading.current_thread().name)
return self.bridge.Call(method, **opts)
def on_popup_click(self, tweet):
from common import pref
from util import net
url = None
if pref('twitter.scan_urls', type=bool, default=True):
links = net.find_links(tweet.text)
if links and len(links) == 1:
url = links[0]
if url is None:
if tweet.tweet_type == 'direct':
url = 'http://twitter.com/#inbox'
else:
url = 'http://twitter.com/%s/statuses/%s' % (tweet.user.screen_name, tweet.id)
wx.LaunchDefaultBrowser(url)
def on_popup_reply(self, text, options):
self.protocol.on_status_with_error_popup(text, options['tweet'].id)
def on_popup_retweet(self, text, options):
self.protocol.on_status_with_error_popup(text)
def on_popup_direct(self, text, options):
tweet = options['tweet']
prefix = 'd ' + tweet.user.screen_name + ' '
if not text.startswith(prefix):
text = prefix + text
self.protocol.on_status_with_error_popup(text)
def on_call_corrupted_database(self, params):
log.info('on_call_corrupted_database %r', params)
self.protocol.events.on_corrupted_database()
def on_call_edit_feed(self, feed):
self.protocol.events.on_edit_feed(feed)
def on_call_feeds(self, feeds):
self.protocol.events.on_feeds(feeds)
def on_call_unread(self, feeds):
self.protocol.events.on_unread_counts(feeds)
def on_call_change_view(self, feed_name):
self.protocol.events.on_change_view(feed_name)
def on_call_send_tweet(self, param, id_):
param = dict((str(k), v) for k, v in param.items())
self.on_status_with_error_popup(**param)
def on_call_favorite_tweet(self, param, id_, webview):
param = dict((str(k), v) for k, v in param.items())
dumps = simplejson.dumps
def run_success(result, **k):
webview.RunScript('''Digsby.successIn(%s, %s);''' % (dumps(result), dumps(id_)))
def run_error(error, **k):
webview.RunScript('''Digsby.errorIn(%s, %s);''' % (dumps(error), dumps(id_)))
self.JSCall('favorite', success=run_success, error=run_error, **param)
def on_call_delete_tweet(self, param, id_, webview):
param = dict((str(k), v) for k, v in param.items())
dumps = simplejson.dumps
def run_success(result, **k):
webview.RunScript('''Digsby.successIn(%s, %s);''' % (dumps(result), dumps(id_)))
def run_error(error, **k):
webview.RunScript('''Digsby.errorIn(%s, %s);''' % (dumps(error), dumps(id_)))
self.JSCall('deleteTweet', success=run_success, error=run_error, **param)
def on_call_get_idle_time(self, params, id_, webview):
from gui.native.helpers import GetUserIdleTime
t = GetUserIdleTime()
wx.CallAfter(Dsuccess, id_, self.webview, idleTime=t)
def json(self, rpc, webview):
# Javascript calls to D from the infobox get sent here
self.on_call(rpc, webview)
def on_call(self, json_obj, webview=None):
params = json_obj.get('params')
method = json_obj.get('method')
id_ = json_obj.get('id')
events = self.protocol.events
try:
call = getattr(self, 'on_call_' + method)
call.__call__
except AttributeError:
pass
else:
if call.func_code.co_argcount < 3:
return call(params[0])
elif call.func_code.co_argcount < 4:
return call(params[0], id_)
else:
return call(params[0], id_, webview)
if method == 'viewChanged':
feedName = params[0].get('feedName')
events.on_view(feedName)
elif method == 'following':
following = params[0].get('following')
events.following(following)
elif method == 'state':
state = params[0].get('state')
if state is not None:
events.state_changed(state)
elif method == 'received_whole_update':
events.received_whole_update()
elif method == 'trends':
trends = params[0].get('trends', None)
if trends is not None:
events.trends(trends)
elif method == 'recentTimeline':
tweets = params[0].get('tweets')
events.recent_timeline(tweets)
elif method == 'selfTweet':
tweet = params[0].get('tweet')
events.self_tweet(tweet)
elif params:
param = params[0]
if param is not None and isinstance(param, dict):
url = param.get('url')
if url and url.startswith('digsby:'):
url = UrlQuery.parse('http' + url[6:], utf8=True) # UrlQuery doesn't like digsby://
q = url['query'].get
netloc = url['netloc']
if netloc == 'reply':
id, screen_name, text = q('id'), q('screen_name'), q('text')
if id and screen_name:
self.protocol.on_reply(id, screen_name, text)
elif netloc == 'retweet':
id, screen_name, text = q('id'), q('screen_name'), q('text')
if id and screen_name:
self.protocol.on_retweet(id, screen_name, text)
elif netloc == 'direct':
screen_name = q('screen_name')
if screen_name:
self.protocol.on_direct(screen_name)
def on_call_next_item(self, params, id_, webview):
return self.protocol.social_feed.jscall_next_item(webview, id_)
def on_call_initialize_feed(self, params, id_, webview):
return self.protocol.social_feed.jscall_initialize_feed(webview, id_)
def on_call_status_update_clicked(self, *a, **k):
self.protocol.events.status_update_clicked()
def on_call_hook(self, hook_name):
'''Allows Javascript to call Hooks.'''
hooks.notify(hook_name)
def on_call_fire(self, opts, id=None, buttons=None, onclick=None):
from common import fire, pref
from gui import skin
# stringify keys, so that they can be keywords.
# also turn dicts into storages
opts = to_storage(dict((str(k), v)
for k, v in opts.iteritems()))
if pref('twitter.popups.user_icons', default=True):
from gui.browser.webkit.imageloader import LazyWebKitImage
twitter_icon = skin.get('serviceicons.twitter', None)
for tweet in opts.tweets:
tweet.icon = LazyWebKitImage(tweet.user.profile_image_url, twitter_icon)
def buttons_cb(item):
if hasattr(item.tweet, 'sender_id'):
return self.direct_buttons
else:
return self.tweet_buttons
opts.update(onclick=onclick or self.on_popup_click,
popupid='twitter20_' + self.username + str(opts.get('popupid_postfix', '')),
buttons=buttons or buttons_cb,
max_lines=10)
if pref('twitter.popups.mark_as_read', default=True):
opts.update(mark_as_read=self.mark_as_read)
opts.update(badge=skin.get('serviceicons.twitter', None))
fire(**opts)
def mark_as_read(self, item):
self.JSCall('markAsRead', tweet_id=item.tweet['id'])
def initialize(self, username, password, userfeeds=None, accountopts=None):
self.username = username
self.password = password
userfeeds = [] if userfeeds is None else userfeeds
def when_load():
self.protocol._verify_databases()
self.evaljs('window.resdir = %s' % simplejson.dumps((path(__file__).parent / 'res').url()))
def success(token):
opts = dict(username=self.username,
password=self.password,
feeds=userfeeds,
accountopts=accountopts or {})
if token is not None:
assert hasattr(token, 'key'), repr(token)
opts.update(oauthTokenKey = token.key,
oauthTokenSecret = token.secret,
oauthConsumerKey = twitter_auth.CONSUMER_KEY,
oauthConsumerSecret = twitter_auth.CONSUMER_SECRET)
time_correction = twitter_auth.get_time_correction()
if time_correction is not None:
opts['accountopts'].update(timeCorrectionSecs=-time_correction)
self.JSCall('initialize', **opts)
api_server = getattr(self.protocol.account, 'api_server', None)
if api_server is not None:
return success(None)
if self.oauth_token is not None:
try:
token = OAuthToken.from_string(self.oauth_token)
except Exception:
traceback.print_exc()
else:
log.info('using token stored in account')
return success(token)
def on_token(token):
token_string = token.to_string()
log.info('on_token received token from network: %r', token_string[:5])
self.protocol.account.update_info(oauth_token=token_string)
success(token)
def on_token_error(e):
errcode = getattr(e, 'code', None)
# if obtaining an token fails, it may be because our time is set incorrectly.
# we can use the Date: header returned by Twitter's servers to adjust for
# this.
if errcode == 401:
server_date = getattr(e, 'hdrs', {}).get('Date', None)
retries_after_401 = getattr(self.protocol, 'retries_after_401', 0)
if server_date and retries_after_401 < 1:
self.protocol.retries_after_401 = retries_after_401 + 1
log.warning('on_token_error: server date is %r', server_date)
server_date = parse_http_date(server_date)
log.warning('on_token_Error: RETRYING WITH NEW SERVER DATE %r', server_date)
twitter_auth.set_server_timestamp(server_date)
return twitter_auth.get_oauth_token(self.username, self.password, success=on_token, error=on_token_error)
state = 'autherror' if errcode == 401 else 'connfail'
log.error('on_token_error: e.code is %r', errcode)
log.error(' changing state to %r', state)
self.protocol.events.state_changed(state)
log.info('getting new oauth token from network')
twitter_auth.get_oauth_token(self.username, self.password, success=on_token, error=on_token_error)
self.when_load = when_load
url = APP_PATH.url()
from gui.browser import webkit
webkit.update_origin_whitelist(url, 'https', 'twitter.com', True)
webkit.update_origin_whitelist(url, 'http', 'twitter.com', True)
api_server = getattr(self.protocol.account, 'api_server', None)
if api_server is not None:
api = UrlQuery.parse(api_server)
webkit.update_origin_whitelist(url, api['scheme'], api['netloc'], True)
self.bridge.LoadURL(url)
def set_oauth_token(self, token):
self.protocol.account.oauth_token = token
def get_oauth_token(self):
return self.protocol.account.oauth_token
oauth_token = property(get_oauth_token, set_oauth_token)
def disconnect(self):
@guithread
def _disconnect():
if not wx.IsDestroyed(self.hidden_frame):
self.hidden_frame.Destroy()
def open_timeline_window(self, feed_name=None):
from .twitter_gui import TwitterFrame
frame = TwitterFrame.ForProtocol(self.protocol)
if frame is not None:
frame.Iconize(False)
frame.Raise()
else:
if feed_name is not None:
js = 'openWindow(%s);' % simplejson.dumps(feed_name)
else:
js = 'openWindow();'
self.evaljs(js)
def update(self):
self.evaljs('update();');
def clear_cache(self):
self.evaljs('dropAllTables();');
def get_users(self, success=None, error=None):
self.JSCall('getUsers', success=success, error=error)
def get_prefs(self, success=None):
self.JSCall('getPrefs', success=success)
def on_status_with_error_popup(self, status, reply_id=None, success=None, error=None):
'''sends a direct or tweet. on error, fire('error') is called (and
optionally your own error callback.'''
def _error(e):
from common import fire
import gui.clipboard
if isinstance(e, basestring):
error_message = u''.join([e, u'\n\n', u'"', status, u'"'])
else:
error_message = status
fire('error',
title=_('Twitter Error'),
major=_('Send Tweet Failed'),
minor=error_message,
buttons=[(_('Retry'), lambda: self.on_status_with_error_popup(status)),
(_('Copy'), lambda: gui.clipboard.copy(status)),
(_('Close'), lambda * a, **k: None)],
sticky=True)
if error is not None:
error(e)
return self.on_status(status, reply_id, success, _error)
def on_status(self, status, reply_id=None, success=None, error=None):
return self.JSCall('tweet',
status=status,
replyTo=reply_id,
success=success,
error=error)
def evaljs(self, js):
if not wx.IsMainThread():
raise AssertionError('evaljs called from thread ' + threading.current_thread().name)
return self.webview.RunScript(js)
def realtime_tweet(self, tweet_json):
script = 'onTweet(' + tweet_json.strip() + ');'
self.evaljs(script)
def on_load(self, e):
e.Skip()
if e.GetState() == wx.webview.WEBVIEW_LOAD_ONLOAD_HANDLED:
if self.when_load is not None:
when_load, self.when_load = self.when_load, None
when_load()
def add_feed(self, feed_info):
self.JSCall('addFeed', feed=feed_info)
def edit_feed(self, feed_info):
self.JSCall('editFeed', feed=feed_info)
def delete_feed(self, feed_name):
self.JSCall('deleteFeed', feedName=feed_name)
def set_feeds(self, feeds):
self.JSCall('setFeeds', feeds=feeds)
def add_group(self, group_info):
self.JSCall('addGroup', group=group_info)
class TwitterIB(gui_providers.InfoboxProviderBase):
protocols.advise(asAdapterForTypes=[TwitterAccount], instancesProvide=[gui_interfaces.ICacheableInfoboxHTMLProvider])
def __init__(self, acct):
gui_providers.InfoboxProviderBase.__init__(self)
self.acct = acct
def get_html(self, *a, **k):
if k.pop('set_dirty', True):
self.acct._dirty = False
return gui_providers.InfoboxProviderBase.get_html(self, *a, **k)
def get_app_context(self, ctxt_class):
return ctxt_class(path(__file__).parent.parent, self.acct.protocol)
def get_context(self):
ctxt = gui_providers.InfoboxProviderBase.get_context(self)
conn = self.acct.twitter_protocol
import twitter_util as tutil
from path import path
resdir = path(__file__).dirname() / 'res'
ctxt.update(acct=self.acct,
conn=conn,
trends=conn.trends,
tweets=[],
counts=conn.unread_counts,
self_tweet=conn.self_tweet,
res=lambda p: (resdir / p).url(),
twitter_linkify=tutil.twitter_linkify,
format_tweet_date=tutil.format_tweet_date)
return ctxt
@property
def _dirty(self):
# TODO: no
return True
def title_from_query(query):
'''
attempts to return a "title" for a search query
>>> title_from_query('"Happy Labor Day" OR "Labour Day"')
'Happy Labor Day'
'''
def dequote(s):
if s.count('"') == 2 and s.startswith('"') and s.endswith('"'):
return s[1:-1]
title = dequote(query)
if title is None:
title = query.split(' OR ')[0].split(' AND ')[0]
title = dequote(title) or title.split()[0].strip(string.punctuation)
return title
def guithread(func):
'''Calls func now if we're on the GUI thread; else calls it later on the
GUI thread.'''
if wx.IsMainThread():
with traceguard:
func()
else:
wx.CallAfter(func)
class input_callback(object):
'''Passed to fire() as handlers for "buttons" callbacks. Causes popups to
show input fields after pressing those buttons.'''
# TODO: document this interface and place an abstract class in toast.py
close_button = True
spellcheck = True
def spellcheck_regexes(self):
import twitter_util
return twitter_util.spellcheck_regex_ignores
def __init__(self, cb, value_cb):
self.input_cb = cb
self.get_value = value_cb
self.char_limit = 140
def _account_pref_key(username, name):
return '.'.join(['twitter.prefs', username, name])
def _get_account_pref(username, name, default):
from common import pref
p = pref(_account_pref_key(username, name), default=default)
if isinstance(p, basestring): p = simplejson.loads(p)
return p
def get_users(callback, accts=None):
if accts is None:
from common import profile
accts = [a for a in profile.socialaccounts
if a.connection is not None
and isinstance(a, TwitterAccount)]
ctx = dict(count=0)
all_users = {}
for acct in accts:
def cb(users):
all_users.update(users)
ctx['count'] += 1
if ctx['count'] == len(accts):
callback(all_users)
acct.connection.get_users(cb)
def try_opening_tempfile(dirpath):
'''
some users see WebKit returning undefined from the openDatabase call. this
function attempts to open a file in the database directory and write to it-
to see if they don't have permission.
'''
try:
tempfile = os.path.join(dirpath, 'test.tmp')
with open(tempfile, 'w') as f:
f.write('test')
if not os.path.isfile(tempfile):
raise Exception('file wasn\'t found after write: %r' % tempfile)
try:
os.remove(tempfile)
except Exception:
pass
except Exception:
traceback.print_exc()
return False
else:
log.info('wrote to %r successfully', dirpath)
return True
def htmlize_tweets(protocol, tweets, self_tweet=None):
'''Given a protocol and a sequence of tweets, returns infobox HTML for them.'''
t = TwitterIB(Storage(twitter_protocol=protocol, protocol='twitter'))
return t.get_html(None,
set_dirty=False,
file='tweets.tenjin',
dir=t.get_context()['app'].get_res_dir('base'),
context=Storage(tweets=tweets, self_tweet=self_tweet))
def parse_http_date(s):
import email.utils
return email.utils.mktime_tz(email.utils.parsedate_tz(s))
| {
"pile_set_name": "Github"
} |
package cn.springcloud.gray.server.oauth2;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContext;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.security.core.userdetails.UserDetails;
import org.springframework.security.oauth2.common.OAuth2AccessToken;
import org.springframework.security.oauth2.provider.*;
import org.springframework.security.oauth2.provider.token.store.JwtTokenStore;
import java.util.HashMap;
import java.util.Map;
public class Oauth2Service {
private static final Logger log = LoggerFactory.getLogger(Oauth2Service.class);
private ClientDetailsService clientDetailsService;
private OAuth2RequestFactory requestFactory;
private DefaultTokenGranter defaultTokenGranter;
private JwtTokenStore jwtTokenStore;
public Oauth2Service(ClientDetailsService clientDetailsService, OAuth2RequestFactory requestFactory, DefaultTokenGranter defaultTokenGranter) {
this.clientDetailsService = clientDetailsService;
this.requestFactory = requestFactory;
this.defaultTokenGranter = defaultTokenGranter;
}
public OAuth2AccessToken getAccessToken(TokenRequestInfo tokenRequestInfo) {
return getAccessToken(tokenRequestInfo, defaultTokenGranter);
}
public OAuth2AccessToken getAccessToken(TokenRequestInfo tokenRequestInfo, TokenGranter tokenGranter) {
String clientId = "gray-server";
HashMap<String, String> parameters = new HashMap<>();
parameters.put("client_id", clientId);
parameters.put("grant_type", DefaultTokenGranter.GRANT_TYPE);
Oauth2TokenRequest oauth2TokenRequest = Oauth2TokenRequest.builder()
.clientId(clientId)
.parameters(parameters)
.tokenRequestInfo(tokenRequestInfo)
.build();
return createAccessToken(oauth2TokenRequest, tokenGranter);
}
private OAuth2AccessToken createAccessToken(Oauth2TokenRequest oauth2TokenRequest, TokenGranter tokenGranter) {
ClientDetails authenticatedClient = clientDetailsService.loadClientByClientId(oauth2TokenRequest.getClientId());
TokenRequest tokenRequest = requestFactory.createTokenRequest(oauth2TokenRequest.getParameters(), authenticatedClient);
tokenRequest = new UserTokenRequest(tokenRequest,
oauth2TokenRequest.getTokenRequestInfo().getUserDetails(),
(Map) oauth2TokenRequest.getTokenRequestInfo().getExtensionProperties());
return tokenGranter.grant(tokenRequest.getGrantType(), tokenRequest);
}
public String getUserPrincipal(){
SecurityContext securityContext = SecurityContextHolder.getContext();
Authentication authentication = securityContext.getAuthentication();
String userPrincipal = null;
if (authentication != null) {
if (authentication.getPrincipal() instanceof UserDetails) {
UserDetails springSecurityUser = (UserDetails) authentication.getPrincipal();
userPrincipal = springSecurityUser.getUsername();
} else if (authentication.getPrincipal() instanceof String) {
userPrincipal = (String) authentication.getPrincipal();
}
}
return userPrincipal;
}
}
| {
"pile_set_name": "Github"
} |
/* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.flowable.rest.service.api.runtime;
import static net.javacrumbs.jsonunit.assertj.JsonAssertions.assertThatJson;
import static org.assertj.core.api.Assertions.assertThat;
import org.apache.http.HttpStatus;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpDelete;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.client.methods.HttpPost;
import org.apache.http.entity.StringEntity;
import org.flowable.engine.runtime.ProcessInstance;
import org.flowable.engine.test.Deployment;
import org.flowable.rest.service.BaseSpringRestTestCase;
import org.flowable.rest.service.api.RestUrls;
import org.junit.Test;
import com.fasterxml.jackson.databind.JsonNode;
import com.fasterxml.jackson.databind.node.ObjectNode;
import net.javacrumbs.jsonunit.core.Option;
/**
* Test for all REST-operations related to a identity links on a Process instance resource.
*
* @author Frederik Heremans
*/
public class ProcessInstanceIdentityLinkResourceTest extends BaseSpringRestTestCase {
/**
* Test getting all identity links.
*/
@Test
@Deployment(resources = { "org/flowable/rest/service/api/runtime/ProcessInstanceIdentityLinkResourceTest.process.bpmn20.xml" })
public void testGetIdentityLinks() throws Exception {
// Test candidate user/groups links + manual added identityLink
ProcessInstance processInstance = runtimeService.startProcessInstanceByKey("oneTaskProcess");
runtimeService.addUserIdentityLink(processInstance.getId(), "john", "customType");
runtimeService.addUserIdentityLink(processInstance.getId(), "paul", "candidate");
// Execute the request
CloseableHttpResponse response = executeRequest(
new HttpGet(SERVER_URL_PREFIX + RestUrls
.createRelativeResourceUrl(RestUrls.URL_PROCESS_INSTANCE_IDENTITYLINKS_COLLECTION, processInstance.getId())), HttpStatus.SC_OK);
JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThat(responseNode).isNotNull();
assertThatJson(responseNode)
.when(Option.IGNORING_ARRAY_ORDER)
.isEqualTo("[ {"
+ " url: '" + SERVER_URL_PREFIX + RestUrls
.createRelativeResourceUrl(RestUrls.URL_PROCESS_INSTANCE_IDENTITYLINK, processInstance.getId(), "john", "customType") + "',"
+ " user: 'john',"
+ " group: null,"
+ " type: 'customType'"
+ "}, {"
+ " url: '" + SERVER_URL_PREFIX + RestUrls
.createRelativeResourceUrl(RestUrls.URL_PROCESS_INSTANCE_IDENTITYLINK, processInstance.getId(), "paul", "candidate") + "',"
+ " user: 'paul',"
+ " group: null,"
+ " type: 'candidate'"
+ "} ]");
}
/**
* Test creating an identity link.
*/
@Test
@Deployment(resources = { "org/flowable/rest/service/api/runtime/ProcessInstanceIdentityLinkResourceTest.process.bpmn20.xml" })
public void testCreateIdentityLink() throws Exception {
ProcessInstance processInstance = runtimeService.startProcessInstanceByKey("oneTaskProcess");
// Add user link
ObjectNode requestNode = objectMapper.createObjectNode();
requestNode.put("user", "kermit");
requestNode.put("type", "myType");
HttpPost httpPost = new HttpPost(
SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_PROCESS_INSTANCE_IDENTITYLINKS_COLLECTION, processInstance.getId()));
httpPost.setEntity(new StringEntity(requestNode.toString()));
CloseableHttpResponse response = executeRequest(httpPost, HttpStatus.SC_CREATED);
JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThat(responseNode).isNotNull();
assertThatJson(responseNode)
.isEqualTo("{"
+ "url: '" + SERVER_URL_PREFIX + RestUrls
.createRelativeResourceUrl(RestUrls.URL_PROCESS_INSTANCE_IDENTITYLINK, processInstance.getId(), "kermit", "myType") + "',"
+ "user: 'kermit',"
+ "type: 'myType',"
+ "group: null"
+ "}");
// Test with unexisting process
httpPost = new HttpPost(
SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_PROCESS_INSTANCE_IDENTITYLINKS_COLLECTION, "unexistingprocess"));
httpPost.setEntity(new StringEntity(requestNode.toString()));
closeResponse(executeRequest(httpPost, HttpStatus.SC_NOT_FOUND));
// Test with no user
requestNode = objectMapper.createObjectNode();
requestNode.put("type", "myType");
httpPost = new HttpPost(
SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_PROCESS_INSTANCE_IDENTITYLINKS_COLLECTION, processInstance.getId()));
httpPost.setEntity(new StringEntity(requestNode.toString()));
closeResponse(executeRequest(httpPost, HttpStatus.SC_BAD_REQUEST));
// Test with group (which is not supported on processes)
requestNode = objectMapper.createObjectNode();
requestNode.put("type", "myType");
requestNode.put("group", "sales");
httpPost.setEntity(new StringEntity(requestNode.toString()));
closeResponse(executeRequest(httpPost, HttpStatus.SC_BAD_REQUEST));
// Test with no type
requestNode = objectMapper.createObjectNode();
requestNode.put("user", "kermit");
httpPost.setEntity(new StringEntity(requestNode.toString()));
closeResponse(executeRequest(httpPost, HttpStatus.SC_BAD_REQUEST));
}
/**
* Test getting a single identity link for a process instance.
*/
@Test
@Deployment(resources = { "org/flowable/rest/service/api/runtime/ProcessInstanceIdentityLinkResourceTest.process.bpmn20.xml" })
public void testGetSingleIdentityLink() throws Exception {
ProcessInstance processInstance = runtimeService.startProcessInstanceByKey("oneTaskProcess");
runtimeService.addUserIdentityLink(processInstance.getId(), "kermit", "myType");
CloseableHttpResponse response = executeRequest(
new HttpGet(SERVER_URL_PREFIX + RestUrls
.createRelativeResourceUrl(RestUrls.URL_PROCESS_INSTANCE_IDENTITYLINK, processInstance.getId(), "kermit", "myType")), HttpStatus.SC_OK);
JsonNode responseNode = objectMapper.readTree(response.getEntity().getContent());
closeResponse(response);
assertThat(responseNode).isNotNull();
assertThatJson(responseNode)
.isEqualTo("{"
+ "url: '" + SERVER_URL_PREFIX + RestUrls
.createRelativeResourceUrl(RestUrls.URL_PROCESS_INSTANCE_IDENTITYLINK, processInstance.getId(), "kermit", "myType") + "',"
+ "user: 'kermit',"
+ "type: 'myType',"
+ "group: null"
+ "}");
// Test with unexisting process
closeResponse(executeRequest(
new HttpGet(SERVER_URL_PREFIX + RestUrls
.createRelativeResourceUrl(RestUrls.URL_PROCESS_INSTANCE_IDENTITYLINK, RestUrls.SEGMENT_IDENTITYLINKS_FAMILY_USERS, "kermit",
"myType")),
HttpStatus.SC_NOT_FOUND));
}
/**
* Test deleting a single identity link for a process instance.
*/
@Test
@Deployment(resources = { "org/flowable/rest/service/api/runtime/ProcessInstanceIdentityLinkResourceTest.process.bpmn20.xml" })
public void testDeleteSingleIdentityLink() throws Exception {
ProcessInstance processInstance = runtimeService.startProcessInstanceByKey("oneTaskProcess");
runtimeService.addUserIdentityLink(processInstance.getId(), "kermit", "myType");
closeResponse(executeRequest(new HttpDelete(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_PROCESS_INSTANCE_IDENTITYLINK, processInstance.getId(), "kermit", "myType")),
HttpStatus.SC_NO_CONTENT));
// Test with unexisting process identity link
closeResponse(executeRequest(new HttpDelete(SERVER_URL_PREFIX + RestUrls.createRelativeResourceUrl(RestUrls.URL_PROCESS_INSTANCE_IDENTITYLINK, processInstance.getId(), "kermit", "myType")),
HttpStatus.SC_NOT_FOUND));
// Test with unexisting process
closeResponse(executeRequest(
new HttpDelete(SERVER_URL_PREFIX
+ RestUrls.createRelativeResourceUrl(RestUrls.URL_PROCESS_INSTANCE_IDENTITYLINK, "unexistingprocess", RestUrls.SEGMENT_IDENTITYLINKS_FAMILY_USERS, "kermit", "myType")),
HttpStatus.SC_NOT_FOUND));
}
}
| {
"pile_set_name": "Github"
} |
1
| {
"pile_set_name": "Github"
} |
package com.thinkaurelius.titan.graphdb.query.vertex;
import com.google.common.base.Preconditions;
import com.thinkaurelius.titan.core.TitanRelation;
import com.thinkaurelius.titan.diskstorage.keycolumnvalue.SliceQuery;
import com.thinkaurelius.titan.graphdb.internal.OrderList;
import com.thinkaurelius.titan.graphdb.query.BackendQueryHolder;
import com.thinkaurelius.titan.graphdb.query.BaseQuery;
import com.thinkaurelius.titan.graphdb.query.QueryUtil;
import com.thinkaurelius.titan.graphdb.query.condition.Condition;
import com.thinkaurelius.titan.graphdb.query.condition.FixedCondition;
import com.thinkaurelius.titan.graphdb.query.profile.ProfileObservable;
import com.thinkaurelius.titan.graphdb.query.profile.QueryProfiler;
import org.apache.tinkerpop.gremlin.structure.Direction;
import java.util.ArrayList;
import java.util.List;
/**
* The base implementation for {@link VertexCentricQuery} which does not yet contain a reference to the
* base vertex of the query. This query is constructed by {@link BasicVertexCentricQueryBuilder#constructQuery(com.thinkaurelius.titan.graphdb.internal.RelationCategory)}
* and then later extended by single or multi-vertex query which add the vertex to the query.
* </p>
* This class override many methods in {@link com.thinkaurelius.titan.graphdb.query.ElementQuery} - check there
* for a description.
*
* @author Matthias Broecheler ([email protected])
*/
public class BaseVertexCentricQuery extends BaseQuery implements ProfileObservable {
/**
* The condition of this query in QNF
*/
protected final Condition<TitanRelation> condition;
/**
* The individual component {@link SliceQuery} of this query. This query is considered an OR
* of the individual components (possibly filtered by the condition if not fitted).
*/
protected final List<BackendQueryHolder<SliceQuery>> queries;
/**
* The result order of this query (if any)
*/
private final OrderList orders;
/**
* The direction condition of this query. This is duplicated from the condition for efficiency reasons.
*/
protected final Direction direction;
public BaseVertexCentricQuery(Condition<TitanRelation> condition, Direction direction,
List<BackendQueryHolder<SliceQuery>> queries, OrderList orders,
int limit) {
super(limit);
Preconditions.checkArgument(condition != null && queries != null && direction != null);
Preconditions.checkArgument(QueryUtil.isQueryNormalForm(condition) && limit>=0);
this.condition = condition;
this.queries = queries;
this.orders = orders;
this.direction=direction;
}
protected BaseVertexCentricQuery(BaseVertexCentricQuery query) {
this(query.getCondition(), query.getDirection(), query.getQueries(), query.getOrders(), query.getLimit());
}
/**
* Construct an empty query
*/
protected BaseVertexCentricQuery() {
this(new FixedCondition<TitanRelation>(false), Direction.BOTH, new ArrayList<BackendQueryHolder<SliceQuery>>(0),OrderList.NO_ORDER,0);
}
public static BaseVertexCentricQuery emptyQuery() {
return new BaseVertexCentricQuery();
}
public Condition<TitanRelation> getCondition() {
return condition;
}
public OrderList getOrders() {
return orders;
}
public Direction getDirection() {
return direction;
}
protected List<BackendQueryHolder<SliceQuery>> getQueries() {
return queries;
}
public boolean isEmpty() {
return getLimit()<=0;
}
public int numSubQueries() {
return queries.size();
}
/**
* A query is considered 'simple' if it is comprised of just one sub-query and that query
* is fitted (i.e. does not require an in-memory filtering).
* @return
*/
public boolean isSimple() {
return queries.size()==1 && queries.get(0).isFitted() && queries.get(0).isSorted();
}
public BackendQueryHolder<SliceQuery> getSubQuery(int position) {
return queries.get(position);
}
public boolean matches(TitanRelation relation) {
return condition.evaluate(relation);
}
@Override
public String toString() {
String s = "["+condition.toString()+"]";
if (hasLimit()) s+=":"+getLimit();
return s;
}
@Override
public void observeWith(QueryProfiler profiler) {
profiler.setAnnotation(QueryProfiler.CONDITION_ANNOTATION,condition);
profiler.setAnnotation(QueryProfiler.ORDERS_ANNOTATION,orders);
if (hasLimit()) profiler.setAnnotation(QueryProfiler.LIMIT_ANNOTATION,getLimit());
queries.forEach(bqh -> bqh.observeWith(profiler));
}
}
| {
"pile_set_name": "Github"
} |
/* Generated by RuntimeBrowser
Image: /System/Library/PrivateFrameworks/StoreBookkeeperClient.framework/StoreBookkeeperClient
*/
@interface SBCPlaybackPositionEntity : NSObject <NSCopying, NSSecureCoding> {
double _bookmarkTime;
double _bookmarkTimestamp;
long long _foreignDatabaseEntityID;
bool _hasBeenPlayed;
SBCPlaybackPositionDomain * _playbackPositionDomain;
NSString * _ubiquitousIdentifier;
unsigned long long _userPlayCount;
}
@property (nonatomic) double bookmarkTime;
@property (nonatomic) double bookmarkTimestamp;
@property (nonatomic, readonly) long long foreignDatabaseEntityID;
@property (nonatomic) bool hasBeenPlayed;
@property (nonatomic, readonly) SBCPlaybackPositionDomain *playbackPositionDomain;
@property (nonatomic, readonly) NSString *ubiquitousIdentifier;
@property (nonatomic) unsigned long long userPlayCount;
+ (bool)supportsSecureCoding;
+ (id)ubiquitousIdentifierWithItemTitle:(id)arg1 albumName:(id)arg2 itemArtistName:(id)arg3;
+ (id)ubiquitousIdentifierWithPodcastFeedURL:(id)arg1 feedGUID:(id)arg2;
+ (id)ubiquitousIdentifierWithUniqueStoreID:(long long)arg1;
+ (id)ubiquitousIdentifierWithiTunesUFeedURL:(id)arg1 feedGUID:(id)arg2;
- (void).cxx_destruct;
- (id)SBKUniversalPlaybackPositionMetadata;
- (double)bookmarkTime;
- (double)bookmarkTimestamp;
- (id)copyWithValuesFromSBKUniversalPlaybackPositionMetadata:(id)arg1;
- (id)copyWithZone:(struct _NSZone { }*)arg1;
- (void)encodeWithCoder:(id)arg1;
- (long long)foreignDatabaseEntityID;
- (bool)hasBeenPlayed;
- (id)initWithCoder:(id)arg1;
- (id)initWithPlaybackPositionDomain:(id)arg1 ubiquitousIdentifier:(id)arg2 foreignDatabaseEntityID:(long long)arg3;
- (id)playbackPositionDomain;
- (void)setBookmarkTime:(double)arg1;
- (void)setBookmarkTimestamp:(double)arg1;
- (void)setHasBeenPlayed:(bool)arg1;
- (void)setUserPlayCount:(unsigned long long)arg1;
- (id)ubiquitousIdentifier;
- (unsigned long long)userPlayCount;
@end
| {
"pile_set_name": "Github"
} |
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package ginkgowrapper wraps Ginkgo Fail and Skip functions to panic
// with structured data instead of a constant string.
package ginkgowrapper
import (
"bufio"
"bytes"
"regexp"
"runtime"
"runtime/debug"
"strings"
"github.com/onsi/ginkgo"
)
// FailurePanic is the value that will be panicked from Fail.
type FailurePanic struct {
Message string // The failure message passed to Fail
Filename string // The filename that is the source of the failure
Line int // The line number of the filename that is the source of the failure
FullStackTrace string // A full stack trace starting at the source of the failure
}
// String makes FailurePanic look like the old Ginkgo panic when printed.
func (FailurePanic) String() string { return ginkgo.GINKGO_PANIC }
// Fail wraps ginkgo.Fail so that it panics with more useful
// information about the failure. This function will panic with a
// FailurePanic.
func Fail(message string, callerSkip ...int) {
skip := 1
if len(callerSkip) > 0 {
skip += callerSkip[0]
}
_, file, line, _ := runtime.Caller(skip)
fp := FailurePanic{
Message: message,
Filename: file,
Line: line,
FullStackTrace: pruneStack(skip),
}
defer func() {
e := recover()
if e != nil {
panic(fp)
}
}()
ginkgo.Fail(message, skip)
}
// SkipPanic is the value that will be panicked from Skip.
type SkipPanic struct {
Message string // The failure message passed to Fail
Filename string // The filename that is the source of the failure
Line int // The line number of the filename that is the source of the failure
FullStackTrace string // A full stack trace starting at the source of the failure
}
// String makes SkipPanic look like the old Ginkgo panic when printed.
func (SkipPanic) String() string { return ginkgo.GINKGO_PANIC }
// Skip wraps ginkgo.Skip so that it panics with more useful
// information about why the test is being skipped. This function will
// panic with a SkipPanic.
func Skip(message string, callerSkip ...int) {
skip := 1
if len(callerSkip) > 0 {
skip += callerSkip[0]
}
_, file, line, _ := runtime.Caller(skip)
sp := SkipPanic{
Message: message,
Filename: file,
Line: line,
FullStackTrace: pruneStack(skip),
}
defer func() {
e := recover()
if e != nil {
panic(sp)
}
}()
ginkgo.Skip(message, skip)
}
// ginkgo adds a lot of test running infrastructure to the stack, so
// we filter those out
var stackSkipPattern = regexp.MustCompile(`onsi/ginkgo`)
func pruneStack(skip int) string {
skip += 2 // one for pruneStack and one for debug.Stack
stack := debug.Stack()
scanner := bufio.NewScanner(bytes.NewBuffer(stack))
var prunedStack []string
// skip the top of the stack
for i := 0; i < 2*skip+1; i++ {
scanner.Scan()
}
for scanner.Scan() {
if stackSkipPattern.Match(scanner.Bytes()) {
scanner.Scan() // these come in pairs
} else {
prunedStack = append(prunedStack, scanner.Text())
scanner.Scan() // these come in pairs
prunedStack = append(prunedStack, scanner.Text())
}
}
return strings.Join(prunedStack, "\n")
}
| {
"pile_set_name": "Github"
} |
fileFormatVersion: 2
guid: 5aab17815ee8842d8a7200e46a5f8d90
MonoImporter:
serializedVersion: 2
defaultReferences: []
executionOrder: 0
icon: {instanceID: 0}
userData:
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8" ?>
<!--
NxBRE - Native syntax rendering of xBusinessRules
Author: David Dossot
-->
<xsl:stylesheet version="1.0" xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:param name="rulesDir"/>
<!-- id generator -->
<xsl:template name="id-gen">
<xsl:param name="id" select="@id"/>
<xsl:choose>
<xsl:when test="$id!=''">
<xsl:attribute name="id"><xsl:value-of select="$id"/></xsl:attribute>
</xsl:when>
<xsl:otherwise>
<xsl:attribute name="id">#dummy<xsl:value-of select="generate-id(.)"/><xsl:value-of select="position()"/>#</xsl:attribute>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<!-- identity transform -->
<xsl:template match="@*|node()">
<xsl:copy>
<xsl:apply-templates select="@*|node()"/>
</xsl:copy>
</xsl:template>
<!-- transform extended rules in NxBRE rules -->
<xsl:template match="xBusinessRules">
<BusinessRules xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:noNamespaceSchemaLocation="http://nxbre.org/businessRules.xsd">
<xsl:comment> BusinessRules generated by transformXRules.xsl </xsl:comment>
<Rule id="#true#" factory="NxBRE.FlowEngine.Rules.True"/>
<Rule id="#false#" factory="NxBRE.FlowEngine.Rules.False"/>
<xsl:apply-templates/>
</BusinessRules>
</xsl:template>
<!-- logical types to .NET types -->
<xsl:template name="type-conv">
<xsl:param name="type" select="@type"/>
<xsl:choose>
<xsl:when test="$type='Exception'">System.Exception</xsl:when>
<xsl:when test="$type='Boolean'">System.Boolean</xsl:when>
<xsl:when test="$type='Byte'">System.SByte</xsl:when>
<xsl:when test="$type='Short'">System.Int16</xsl:when>
<xsl:when test="$type='Integer'">System.Int32</xsl:when>
<xsl:when test="$type='Long'">System.Int64</xsl:when>
<xsl:when test="$type='Single'">System.Single</xsl:when>
<xsl:when test="$type='Double'">System.Double</xsl:when>
<xsl:when test="$type='Decimal'">System.Decimal</xsl:when>
<xsl:when test="$type='Date'">System.DateTime</xsl:when>
<xsl:when test="$type='DateTime'">System.DateTime</xsl:when>
<xsl:when test="$type='Time'">System.DateTime</xsl:when>
<xsl:when test="$type='String'">System.String</xsl:when>
<xsl:otherwise><xsl:value-of select="$type"/></xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="Exception | Boolean | Byte | Short | Integer | Long | Single | Double | Decimal | Date | DateTime | Time | String">
<Rule id="{@id}" factory="NxBRE.FlowEngine.Rules.Value">
<Parameter name="Value" value="{@value}"/>
<Parameter name="Type">
<xsl:attribute name="value">
<xsl:call-template name="type-conv">
<xsl:with-param name="type" select="name()"/>
</xsl:call-template>
</xsl:attribute>
</Parameter>
</Rule>
</xsl:template>
<xsl:template match="Parameter[@name='Type']">
<Parameter name="Type">
<xsl:attribute name="value">
<xsl:call-template name="type-conv">
<xsl:with-param name="type" select="@value"/>
</xsl:call-template>
</xsl:attribute>
</Parameter>
</xsl:template>
<xsl:template match="Parameter[@name!='Type' and @valueId!='']">
<Parameter name="{@name}" ruleValue="{@valueId}"/>
</xsl:template>
<xsl:template match="Argument">
<Parameter name="Argument{position()-1}">
<xsl:if test="@type != ''">
<xsl:attribute name="type">
<xsl:call-template name="type-conv">
<xsl:with-param name="type" select="@type"/>
</xsl:call-template>
</xsl:attribute>
</xsl:if>
<xsl:if test="@valueId != ''">
<xsl:attribute name="ruleValue">
<xsl:value-of select="@valueId"/>
</xsl:attribute>
</xsl:if>
<xsl:copy-of select="@value"/>
</Parameter>
</xsl:template>
<!-- conditions -->
<xsl:template match="And">
<Condition type="AND">
<xsl:apply-templates/>
</Condition>
</xsl:template>
<xsl:template match="Or">
<Condition type="OR">
<xsl:apply-templates/>
</Condition>
</xsl:template>
<xsl:template match="Not">
<Condition type="NOT">
<xsl:apply-templates/>
</Condition>
</xsl:template>
<!-- operators -->
<xsl:template match="Equals | GreaterThan | GreaterThanEqualTo | InstanceOf | LessThan | LessThanEqualTo | NotEquals">
<Compare leftId="{@leftId}" operator="NxBRE.FlowEngine.Rules.{name()}" rightId="{@rightId}">
<xsl:apply-templates/>
</Compare>
</xsl:template>
<xsl:template match="Matches">
<Compare leftId="{@valueId}" operator="NxBRE.FlowEngine.Rules.{name()}" rightId="{@patternId}">
<xsl:apply-templates/>
</Compare>
</xsl:template>
<xsl:template name="in-values">
<xsl:param name="idList" select="@idList"/>
<xsl:choose>
<xsl:when test="contains($idList, ',')">
<Compare leftId="{@valueId}" operator="NxBRE.FlowEngine.Rules.Equals" rightId="{substring-before($idList,',')}">
<xsl:apply-templates/>
</Compare>
<xsl:call-template name="in-values">
<xsl:with-param name="idList" select="substring-after($idList,',')"/>
</xsl:call-template>
</xsl:when>
<xsl:otherwise>
<Compare leftId="{@valueId}" operator="NxBRE.FlowEngine.Rules.Equals" rightId="{$idList}">
<xsl:apply-templates/>
</Compare>
</xsl:otherwise>
</xsl:choose>
</xsl:template>
<xsl:template match="In">
<Condition type="OR">
<xsl:call-template name="in-values"/>
</Condition>
</xsl:template>
<xsl:template match="IsAsserted">
<Compare leftId="{@valueId}" operator="NxBRE.FlowEngine.Rules.InstanceOf" rightId="{@valueId}"/>
</xsl:template>
<xsl:template match="IsTrue">
<Compare leftId="{@valueId}" operator="NxBRE.FlowEngine.Rules.Equals" rightId="#true#">
<xsl:apply-templates/>
</Compare>
</xsl:template>
<xsl:template match="IsFalse">
<Compare leftId="{@valueId}" operator="NxBRE.FlowEngine.Rules.Equals" rightId="#false#">
<xsl:apply-templates/>
</Compare>
</xsl:template>
<xsl:template match="Between">
<Condition type="AND">
<Compare leftId="{@leftId}" rightId="{@valueId}">
<xsl:attribute name="operator">
<xsl:choose>
<xsl:when test="@excludeLeft='true'">NxBRE.FlowEngine.Rules.LessThan</xsl:when>
<xsl:otherwise>NxBRE.FlowEngine.Rules.LessThanEqualTo</xsl:otherwise>
</xsl:choose>
</xsl:attribute>
<xsl:apply-templates/>
</Compare>
<Compare leftId="{@valueId}" rightId="{@rightId}">
<xsl:attribute name="operator">
<xsl:choose>
<xsl:when test="@excludeRight='true'">NxBRE.FlowEngine.Rules.LessThan</xsl:when>
<xsl:otherwise>NxBRE.FlowEngine.Rules.LessThanEqualTo</xsl:otherwise>
</xsl:choose>
</xsl:attribute>
<xsl:apply-templates/>
</Compare>
</Condition>
</xsl:template>
<xsl:template match="ForEach">
<ForEach id="{@id}" ruleValue="{@valueId}">
<xsl:apply-templates/>
</ForEach>
</xsl:template>
<!-- object related rules -->
<xsl:template match="Assert | Evaluate | Modify">
<Rule id="{@id}">
<xsl:if test="name()='Assert'">
<xsl:attribute name="factory">NxBRE.FlowEngine.Rules.Value</xsl:attribute>
</xsl:if>
<Parameter name="Type">
<xsl:attribute name="value">
<xsl:call-template name="type-conv"/>
</xsl:attribute>
</Parameter>
<xsl:choose>
<xsl:when test="@valueId!=''">
<Parameter name="Value" ruleValue="{@valueId}"/>
</xsl:when>
<xsl:when test="@value!=''">
<Parameter name="Value" value="{@value}"/>
</xsl:when>
</xsl:choose>
<xsl:apply-templates/>
</Rule>
</xsl:template>
<xsl:template match="ObjectLookup">
<Rule factory="NxBRE.FlowEngine.Rules.ObjectLookup">
<xsl:call-template name="id-gen"/>
<xsl:if test="@objectId != ''">
<Parameter name="ObjectId" value="{@objectId}"/>
</xsl:if>
<xsl:if test="@member != ''">
<Parameter name="Member" value="{@member}"/>
</xsl:if>
<xsl:if test="@type != ''">
<Parameter name="Type" value="{@type}"/>
</xsl:if>
<xsl:apply-templates/>
</Rule>
</xsl:template>
<!-- set related rules -->
<xsl:template match="InvokeSet">
<InvokeSet>
<xsl:if test="@id != ''">
<xsl:attribute name="id">
<xsl:value-of select="@id"/>
</xsl:attribute>
</xsl:if>
<xsl:if test="@valueId != ''">
<xsl:attribute name="ruleValue">
<xsl:value-of select="@valueId"/>
</xsl:attribute>
</xsl:if>
</InvokeSet>
</xsl:template>
<!-- Increment -->
<xsl:template match="Increment[@step]">
<Rule id="{@id}" factory="NxBRE.FlowEngine.Rules.Increment">
<Parameter name="Increment" value="{@step}"/>
</Rule>
</xsl:template>
<xsl:template match="Increment[count(@step)=0]">
<Rule factory="NxBRE.FlowEngine.Rules.IncrementInit">
<xsl:call-template name="id-gen">
<xsl:with-param name="id"/>
</xsl:call-template>
<Parameter name="Id" value="{@id}"/>
<xsl:if test="@valueId!=''">
<Parameter name="Init" ruleValue="{@valueId}"/>
</xsl:if>
<xsl:if test="@value!=''">
<Parameter name="Init" value="{@value}"/>
</xsl:if>
</Rule>
</xsl:template>
<!-- other rules -->
<xsl:template match="True">
<Rule id="{@id}" factory="NxBRE.FlowEngine.Rules.True"/>
</xsl:template>
<xsl:template match="False">
<Rule id="{@id}" factory="NxBRE.FlowEngine.Rules.False"/>
</xsl:template>
<xsl:template match="ThrowException | ThrowFatalException">
<Rule factory="NxBRE.FlowEngine.Rules.{substring-after(name(),'Throw')}">
<xsl:call-template name="id-gen"/>
<xsl:if test="@value!=''">
<Parameter name="Message" value="{@value}"/>
</xsl:if>
<xsl:if test="@valueId!=''">
<Parameter name="Message" ruleValue="{@valueId}"/>
</xsl:if>
</Rule>
</xsl:template>
</xsl:stylesheet>
| {
"pile_set_name": "Github"
} |
package tests.wurstscript.tests;
import org.testng.annotations.Test;
public class TupleTests extends WurstScriptTest {
@Test
public void simple() {
testAssertOkLines(true,
"package test",
" native testSuccess()",
" tuple vec(real x, real y, real z)",
" init",
" vec v = vec(1,2,3)",
" v.x = 4",
" vec u = v",
" u.y = 5",
" if v.x == 4 and v.y == 2 and u.y == 5",
" testSuccess()",
"endpackage"
);
}
@Test
public void swap() {
testAssertOkLines(true,
"package test",
" native testSuccess()",
" tuple pair(real x, real y)",
" init",
" pair p = pair(1,2)",
" p = pair(p.y, p.x)",
" if p.x == 2 and p.y == 1",
" testSuccess()",
"endpackage"
);
}
@Test
public void parameter() {
testAssertOkLines(true,
"package test",
" native testSuccess()",
" tuple vec(real x, real y, real z)",
" function sum(vec v) returns real",
" return v.x + v.y + v.z",
" init",
" let v = vec(4,5,6)",
" if sum(v) == 15",
" testSuccess()",
"endpackage"
);
}
@Test
public void returnValue() {
testAssertOkLines(true,
"package test",
" native testSuccess()",
" tuple vec(real x, real y, real z)",
" function blub(real x, real y) returns vec",
" return vec(x, y, 0)",
" init",
" let v = blub(4,5)",
" if v.x == 4 and v.y == 5 and v.z == 0",
" testSuccess()",
"endpackage"
);
}
@Test
public void vecs() {
testAssertOkLines(true,
"package test",
" native testSuccess()",
" tuple vec(real x, real y, real z)",
" function vec.plus(vec other) returns vec",
" return vec(this.x + other.x, this.y + other.y, this.z + other.z)",
" init",
" let v = vec(1,2,3).plus(vec(4,5,6))",
" if v.x == 5 and v.y == 7 and v.z == 9",
" testSuccess()",
"endpackage"
);
}
@Test
public void vecs2() {
testAssertOkLines(true,
"package test",
" native testSuccess()",
" tuple vec(real x, real y, real z)",
" function vec.plus(vec other) returns vec",
" if this.x > 0",
" return vec(this.x + other.x, this.y + other.y, this.z + other.z)",
" else",
" return vec(this.x + other.x, this.y + other.y, this.z + other.z)",
" init",
" if vec(1,2,3).plus(vec(4,5,6)).y == 7",
" testSuccess()",
"endpackage"
);
}
@Test
public void bugtest() {
testAssertOkLines(false,
"package test",
" tuple tup(real x)",
" function foo(tup mytup)",
" real y = mytup.x",
"endpackage"
);
}
@Test
public void vecs4() {
testAssertOkLines(false,
"package test",
" native testSuccess()",
" native Sin(real x) returns real",
" native Cos(real x) returns real",
" tuple vec3(real x, real y, real z)",
" public function polarProjection3d( vec3 pos, real distance, real angleGround, real angleAir ) returns vec3",
" real x = pos.x + distance * Cos(angleGround) * Sin(angleAir)",
" real y = pos.y + distance * Sin(angleGround) * Sin(angleAir)",
" real z = pos.z + distance * Cos(angleAir) ",
" return vec3(x,y,z)",
"endpackage"
);
}
@Test
public void vecsTrim() {
testAssertOkLines(false,
"package test",
" native testSuccess()",
" tuple vec3(real x, real y, real z)",
" public function vec3.trim( real value ) returns vec3",
" vec3 result = this",
" if result.x > -value and result.x < value",
" result.x = 0.",
" if result.y > -value and result.y < value",
" result.y = 0.",
" if result.z > -value and result.z < value",
" result.z = 0.",
" return result",
" vec3 array vs",
" init",
" vs[0] = vec3(3,15,4)",
" if 3 > 2",
" vs[0].trim(5)",
"endpackage"
);
}
@Test
public void vecsTrim2() {
testAssertOkLines(false,
"package test",
" native testSuccess()",
" tuple vec3(real x, real y, real z)",
" public function vec3.trim( real value ) returns vec3",
" vec3 result = this",
" if result.x > -value and result.x < value",
" result.x = 0.",
" if result.y > -value and result.y < value",
" result.y = 0.",
" if result.z > -value and result.z < value",
" result.z = 0.",
" return result",
" vec3 array vs",
" function foo(int x) returns int",
" if x > 0",
" return x",
" else",
" return -x",
" init",
" vs[0] = vec3(3,15,4)",
" vs[foo(3)].trim(foo(4)*1.)",
"endpackage"
);
}
@Test
public void inClass() {
testAssertOkLines(true,
"package test",
" native testSuccess()",
" tuple vec(real x, real y, real z)",
" class A",
" vec pos",
" construct()",
" pos = vec(1,1,1)",
" function foo()",
" pos.x = 3",
" init",
" A a = new A()",
" a.foo()",
" if a.pos.x == 3",
" testSuccess()",
"endpackage"
);
}
@Test
public void tupleInTuple() {
testAssertOkLines(true,
"package test",
" native testSuccess()",
" tuple vec(real x, real y)",
" tuple line(vec start, vec end_)",
" init",
" let l = line(vec(1,2), vec(3,4))",
" if l.end_.x == 3",
" testSuccess()",
"endpackage"
);
}
@Test
public void tupleInTuple2() {
testAssertOkLines(true,
"package test",
" native testSuccess()",
" tuple vec(real x, real y)",
" tuple line(vec start, vec end_)",
" function bla(vec v) returns real",
" return v.x + v.y",
" init",
" let l = line(vec(1,2), vec(3,4))",
" if bla(l.end_) == 7",
" testSuccess()",
"endpackage"
);
}
@Test
public void testEquals() {
testAssertOkLines(true,
"package test",
" native testSuccess()",
" tuple tup(int a, int b, int c)",
" init",
" let a = tup(1,2,3)",
" let b = tup(1,2,3)",
" let c = tup(1,1,3)",
" if a == b and (not b == c)",
" testSuccess()",
"endpackage"
);
}
@Test
public void testUnequals() {
testAssertOkLines(true,
"package test",
" native testSuccess()",
" tuple tup(int a, int b, int c)",
" init",
" let a = tup(1,2,3)",
" let b = tup(1,2,3)",
" let c = tup(1,1,3)",
" if a != c and (not a != b)",
" testSuccess()",
"endpackage"
);
}
@Test
public void testSingletuple() {
testAssertOkLines(true,
"package test",
" native testSuccess()",
" tuple tup(int a)",
" function foo(boolean b) returns tup",
" if b",
" return tup(2)",
" return tup(3)",
" init",
" let a = foo(true)",
" if a.a == 2",
" testSuccess()",
"endpackage"
);
}
@Test
public void testSingletuple2() {
testAssertOkLines(true,
"package test",
" native testSuccess()",
" tuple tup(int a)",
" function foo(tup t) returns tup",
" if t.a > 5",
" return tup(2)",
" return tup(3)",
" tup array x",
" init",
" x[0] = tup(8)",
" let a = foo(x[0])",
" if a.a == 2",
" testSuccess()",
"endpackage"
);
}
@Test
public void tupleInTuple3() {
testAssertOkLines(true,
"package test",
" native testSuccess()",
" native println(string s)",
" @extern native I2S(int i) returns string",
" tuple vec(int x, int y)",
" tuple line(vec a, vec b)",
" line array l",
" init",
" l[2] = line(vec(1,2),vec(3,4))",
" string s = \"abc\"",
" s += I2S(l[2].a.x)",
" s += I2S(l[2].a.y)",
" s += I2S(l[2].b.x)",
" s += I2S(l[2].b.y)",
" if s == \"abc1234\"",
" testSuccess()",
"endpackage"
);
}
@Test
public void tupleInTuple4() {
testAssertOkLines(true,
"package test",
"native testSuccess()",
"tuple vec2(int x, int y)",
"tuple rectangle(vec2 min, vec2 max)",
"init",
" rectangle r = rectangle(vec2(1,2), vec2(3,4))",
" r.min.x = 5",
" if r.min.x == 5",
" testSuccess()"
);
}
@Test
public void tupleInTuple5() {
testAssertOkLines(true,
"package test",
"native testSuccess()",
"tuple vec2(int x, int y)",
"tuple rectangle(vec2 min, vec2 max)",
"init",
" rectangle r = rectangle(vec2(1,2), vec2(3,4))",
" r.min = vec2(5,2)",
" if r.min.x == 5",
" testSuccess()"
);
}
@Test
public void tupleReturn() {
testAssertOkLines(true,
"package test",
"native testSuccess()",
"@extern native GetRandomReal(real x, real y) returns real",
"tuple vec2(real x, real y)",
"function randomPoint() returns vec2",
" return vec2(GetRandomReal(0, 1), GetRandomReal(3, 4))",
"function a() returns vec2",
" return randomPoint()",
"init",
" let v = a()",
" testSuccess()"
);
}
@Test
public void tupleReturn2() {
testAssertOkLines(true,
"package test",
"native testSuccess()",
"@extern native GetRandomReal(real x, real y) returns real",
"tuple vec2(real x, real y)",
"tuple t(vec2 v, real r)",
"function randomPoint() returns vec2",
" return vec2(GetRandomReal(0, 1), GetRandomReal(3, 4))",
"function a() returns t",
" return t(randomPoint(), 1)",
"init",
" let v = a()",
" testSuccess()"
);
}
@Test
public void nestedTuple() { // #713
testAssertOkLines(true,
"package test",
"native testSuccess()",
"native println(string s)",
"@extern native I2S(int x) returns string",
"function print(int x)",
" println(I2S(x))",
"@extern native GetRandomInt(int x, int y) returns int",
"tuple parent(child a, int index)",
"function newParent(int i) returns parent",
" return parent(child(0, 0, 0, 0), i)",
"tuple child(int a, int b, int c, int d)",
"var putCount = 0",
"function child.put(int i, int num) returns child",
" putCount += 1",
" if i == 0",
" return child(num, this.b, this.c, this.d)",
" else if i == 1",
" return child(this.a, num, this.b, this.d)",
" else if i == 2",
" return child(this.a, this.b, num, this.d)",
" else",
" return child(this.a, this.b, this.b, num)",
"function randomOperations(parent t, int val) returns parent",
" var some = t",
" some.a = some.a.put(t.index, val)",
" return some",
"init",
" var t = randomOperations(newParent(GetRandomInt(0, 3)), 100)",
" print(t.a.a)",
" print(t.a.b)",
" print(t.a.c)",
" print(t.a.d)",
" print(putCount)",
" if putCount == 1",
" testSuccess()"
);
}
@Test
public void nestedTuple2() { // #713
testAssertOkLines(true,
"package test",
"native testSuccess()",
"native println(string s)",
"@extern native I2S(int x) returns string",
"function print(int x)",
" println(I2S(x))",
"tuple parent(child a, child b)",
"tuple child(int x, int y, int z)",
"init",
" var t = parent(child(1,2,3), child(4,5,6))",
" print(t.a.x)",
" print(t.a.y)",
" print(t.a.z)",
" print(t.b.x)",
" print(t.b.y)",
" print(t.b.z)",
" t.a = child(7,8,9)",
" print(t.a.x)",
" print(t.a.y)",
" print(t.a.z)",
" if t.a == child(7, 8, 9)",
" testSuccess()"
);
}
@Test
public void tupleArrayInClass() { // see #572
testAssertOkLines(true,
"package test",
"native testSuccess()",
"class C",
" angle array[2] ang",
"tuple angle(real rad)",
"init",
" let c = new C",
" c.ang[0] = angle(4.)",
" c.ang[1] = angle(2.)",
" if c.ang[0].rad == 4. and c.ang[1].rad == 2",
" testSuccess()"
);
}
@Test
public void tupleArrayInClass2() { // see #572
testAssertOkLines(true,
"package test",
"native testSuccess()",
"@extern native R2S(real r) returns string",
"native println(string s)",
"class C",
" vec array[5] v",
"tuple vec(real x, real y, real z)",
"init",
" let c = new C",
" c.v[0] = vec(1,2,3)",
" c.v[1].x = 5",
" c.v[1].y = 6",
" c.v[1].z = 7",
" println(R2S(c.v[0].x))",
" println(R2S(c.v[0].z))",
" println(R2S(c.v[1].y))",
" if c.v[0].x == 1 and c.v[0].z == 3 and c.v[1].y == 6",
" testSuccess()"
);
}
@Test
public void tupleArraySideEvaluationOrder() {
testAssertOkLines(true,
"package test",
"native testSuccess()",
"int x = 1",
"tuple p(int x, int y)",
"p array ar",
"function foo() returns int",
" x = x * 2",
" return 1",
"function bar() returns int",
" x = x + 1",
" return 1",
"init",
" ar[foo()] = p(bar(), 7)",
" if x == 3 and ar[1].x == 1 and ar[1].y == 7",
" testSuccess()"
);
}
@Test
public void nullStringInTuple() {
testAssertOkLines(true,
"package test",
"native testSuccess()",
"tuple p(int x, string y)",
"constant c = p(1, null)",
"init",
" if c.x == 1 and c.y == null",
" testSuccess()"
);
}
@Test
public void nestedTupleArray() {
testAssertOkLines(true,
"package test",
"native testSuccess()",
"tuple vec2(int x, int y)",
"tuple clicks(vec2 left, vec2 middle, vec2 right)",
"",
"clicks array lastClicks",
"function getId(int i) returns int",
" return 0",
"public function int.getLastClick() returns vec2",
" return lastClicks[getId(this)].middle",
"init",
" lastClicks[0] = clicks(vec2(1,2), vec2(3,4), vec2(5, 6))",
" let p = 0",
" if p.getLastClick() == vec2(3,4)",
" testSuccess()"
);
}
}
| {
"pile_set_name": "Github"
} |
/**
* \file lzma/subblock.h
* \brief Subblock filter
*/
/*
* Author: Lasse Collin
*
* This file has been put into the public domain.
* You can do whatever you want with this file.
*
* See ../lzma.h for information about liblzma as a whole.
*/
#ifndef LZMA_H_INTERNAL
# error Never include this file directly. Use <lzma.h> instead.
#endif
/**
* \brief Filter ID
*
* Filter ID of the Subblock filter. This is used as lzma_filter.id.
*/
#define LZMA_FILTER_SUBBLOCK LZMA_VLI_C(0x01)
/**
* \brief Subfilter mode
*
* See lzma_options_subblock.subfilter_mode for details.
*/
typedef enum {
LZMA_SUBFILTER_NONE,
/**<
* No Subfilter is in use.
*/
LZMA_SUBFILTER_SET,
/**<
* New Subfilter has been requested to be initialized.
*/
LZMA_SUBFILTER_RUN,
/**<
* Subfilter is active.
*/
LZMA_SUBFILTER_FINISH
/**<
* Subfilter has been requested to be finished.
*/
} lzma_subfilter_mode;
/**
* \brief Options for the Subblock filter
*
* Specifying options for the Subblock filter is optional: if the pointer
* options is NULL, no subfilters are allowed and the default value is used
* for subblock_data_size.
*/
typedef struct {
/* Options for encoder and decoder */
/**
* \brief Allowing subfilters
*
* If this true, subfilters are allowed.
*
* In the encoder, if this is set to false, subfilter_mode and
* subfilter_options are completely ignored.
*/
lzma_bool allow_subfilters;
/* Options for encoder only */
/**
* \brief Alignment
*
* The Subblock filter encapsulates the input data into Subblocks.
* Each Subblock has a header which takes a few bytes of space.
* When the output of the Subblock encoder is fed to another filter
* that takes advantage of the alignment of the input data (e.g. LZMA),
* the Subblock filter can add padding to keep the actual data parts
* in the Subblocks aligned correctly.
*
* The alignment should be a positive integer. Subblock filter will
* add enough padding between Subblocks so that this is true for
* every payload byte:
* input_offset % alignment == output_offset % alignment
*
* The Subblock filter assumes that the first output byte will be
* written to a position in the output stream that is properly
* aligned. This requirement is automatically met when the start
* offset of the Stream or Block is correctly told to Block or
* Stream encoder.
*/
uint32_t alignment;
# define LZMA_SUBBLOCK_ALIGNMENT_MIN 1
# define LZMA_SUBBLOCK_ALIGNMENT_MAX 32
# define LZMA_SUBBLOCK_ALIGNMENT_DEFAULT 4
/**
* \brief Size of the Subblock Data part of each Subblock
*
* This value is re-read every time a new Subblock is started.
*
* Bigger values
* - save a few bytes of space;
* - increase latency in the encoder (but no effect for decoding);
* - decrease memory locality (increased cache pollution) in the
* encoder (no effect in decoding).
*/
uint32_t subblock_data_size;
# define LZMA_SUBBLOCK_DATA_SIZE_MIN 1
# define LZMA_SUBBLOCK_DATA_SIZE_MAX (UINT32_C(1) << 28)
# define LZMA_SUBBLOCK_DATA_SIZE_DEFAULT 4096
/**
* \brief Run-length encoder remote control
*
* The Subblock filter has an internal run-length encoder (RLE). It
* can be useful when the data includes byte sequences that repeat
* very many times. The RLE can be used also when a Subfilter is
* in use; the RLE will be applied to the output of the Subfilter.
*
* Note that in contrast to traditional RLE, this RLE is intended to
* be used only when there's a lot of data to be repeated. If the
* input data has e.g. 500 bytes of NULs now and then, this RLE
* is probably useless, because plain LZMA should provide better
* results.
*
* Due to above reasons, it was decided to keep the implementation
* of the RLE very simple. When the rle variable is non-zero, it
* subblock_data_size must be a multiple of rle. Once the Subblock
* encoder has got subblock_data_size bytes of input, it will check
* if the whole buffer of the last subblock_data_size can be
* represented with repeats of chunks having size of rle bytes.
*
* If there are consecutive identical buffers of subblock_data_size
* bytes, they will be encoded using a single repeat entry if
* possible.
*
* If need arises, more advanced RLE can be implemented later
* without breaking API or ABI.
*/
uint32_t rle;
# define LZMA_SUBBLOCK_RLE_OFF 0
# define LZMA_SUBBLOCK_RLE_MIN 1
# define LZMA_SUBBLOCK_RLE_MAX 256
/**
* \brief Subfilter remote control
*
* When the Subblock filter is initialized, this variable must be
* LZMA_SUBFILTER_NONE or LZMA_SUBFILTER_SET.
*
* When subfilter_mode is LZMA_SUBFILTER_NONE, the application may
* put Subfilter options to subfilter_options structure, and then
* set subfilter_mode to LZMA_SUBFILTER_SET. No new input data will
* be read until the Subfilter has been enabled. Once the Subfilter
* has been enabled, liblzma will set subfilter_mode to
* LZMA_SUBFILTER_RUN.
*
* When subfilter_mode is LZMA_SUBFILTER_RUN, the application may
* set subfilter_mode to LZMA_SUBFILTER_FINISH. All the input
* currently available will be encoded before unsetting the
* Subfilter. Application must not change the amount of available
* input until the Subfilter has finished. Once the Subfilter has
* finished, liblzma will set subfilter_mode to LZMA_SUBFILTER_NONE.
*
* If the intent is to have Subfilter enabled to the very end of
* the data, it is not needed to separately disable Subfilter with
* LZMA_SUBFILTER_FINISH. Using LZMA_FINISH as the second argument
* of lzma_code() will make the Subblock encoder to disable the
* Subfilter once all the data has been ran through the Subfilter.
*
* After the first call with LZMA_SYNC_FLUSH or LZMA_FINISH, the
* application must not change subfilter_mode until LZMA_STREAM_END.
* Setting LZMA_SUBFILTER_SET/LZMA_SUBFILTER_FINISH and
* LZMA_SYNC_FLUSH/LZMA_FINISH _at the same time_ is fine.
*
* \note This variable is ignored if allow_subfilters is false.
*/
lzma_subfilter_mode subfilter_mode;
/**
* \brief Subfilter and its options
*
* When no Subfilter is used, the data is copied as is into Subblocks.
* Setting a Subfilter allows encoding some parts of the data with
* an additional filter. It is possible to many different Subfilters
* in the same Block, although only one can be used at once.
*
* \note This variable is ignored if allow_subfilters is false.
*/
lzma_filter subfilter_options;
} lzma_options_subblock;
| {
"pile_set_name": "Github"
} |
[config]
command = record
args = -g dwarf -- kill >/dev/null 2>&1
[event:base-record]
sample_type=12583
exclude_callchain_user=1
sample_stack_user=8192
# TODO different for each arch, no support for that now
sample_regs_user=*
| {
"pile_set_name": "Github"
} |
set(TARGET Proto_MRA)
include(${CMAKE_SOURCE_DIR}/cmake/icons.cmake) | {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<LinearLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:orientation="vertical"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
>
<Button
android:id="@+id/MyButton"
android:layout_width="fill_parent"
android:layout_height="wrap_content"
android:text="@string/Hello"
/>
</LinearLayout>
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
<plist version="1.0">
<dict>
<key>CFBundleDevelopmentRegion</key>
<string>English</string>
<key>CFBundleExecutable</key>
<string>${EXECUTABLE_NAME}</string>
<key>CFBundleIdentifier</key>
<string>com.yourcompany.${PRODUCT_NAME:identifier}</string>
<key>CFBundleInfoDictionaryVersion</key>
<string>6.0</string>
<key>CFBundlePackageType</key>
<string>BNDL</string>
<key>CFBundleSignature</key>
<string>????</string>
<key>CFBundleVersion</key>
<string>1.0</string>
</dict>
</plist>
| {
"pile_set_name": "Github"
} |
import numpy as np
from pymoo.algorithms.nsga2 import RankAndCrowdingSurvival
from pymoo.algorithms.so_local_search import LocalSearch
from pymoo.model.individual import Individual
from pymoo.model.population import Population
from pymoo.util.display import SingleObjectiveDisplay
from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting
from pymoo.util.normalization import normalize, denormalize
def norm_bounds(pop, problem):
nxl = normalize(pop.get("xl"), problem.xl, problem.xu)
nxu = normalize(pop.get("xu"), problem.xl, problem.xu)
return nxl, nxu
def update_bounds(ind, xl, xu, k, delta):
_xl = np.copy(xl)
_xl[k] = ind.X[k] - delta
ind.set("xl", _xl)
_xu = np.copy(xu)
_xu[k] = ind.X[k] + delta
ind.set("xu", _xu)
class DIRECT(LocalSearch):
def __init__(self,
eps=1e-2,
penalty=0.1,
n_max_candidates=10,
display=SingleObjectiveDisplay(),
**kwargs):
super().__init__(display=display, **kwargs)
self.eps = eps
self.penalty = penalty
self.n_max_candidates = n_max_candidates
def setup(self, problem, **kwargs):
super().setup(problem, **kwargs)
xl, xu = problem.bounds()
X = denormalize(0.5 * np.ones(problem.n_var), xl, xu)
x0 = Individual(X=X)
x0.set("xl", xl)
x0.set("xu", xu)
x0.set("depth", 0)
self.x0 = x0
def _initialize(self, **kwargs):
super()._initialize(**kwargs)
def _potential_optimal(self):
pop = self.pop
if len(pop) == 1:
return pop
# get the intervals of each individual
_F, _CV, xl, xu = pop.get("F", "CV", "xl", "xu")
nF = normalize(_F)
F = nF + self.penalty * _CV
# get the length of the interval of each solution
nxl, nxu = norm_bounds(pop, self.problem)
length = (nxu - nxl) / 2
val = length.max(axis=1)
# (a) non-dominated with respect to interval
obj = np.column_stack([-val, F])
I = NonDominatedSorting().do(obj, only_non_dominated_front=True)
candidates, F, xl, xu, val = pop[I], F[I], xl[I], xu[I], val[I]
# import matplotlib.pyplot as plt
# plt.scatter(obj[:, 0], obj[:, 1])
# plt.scatter(obj[I, 0], obj[I, 1], color="red")
# plt.show()
if len(candidates) == 1:
return candidates
else:
if len(candidates) > self.n_max_candidates:
candidates = RankAndCrowdingSurvival().do(self.problem, pop, self.n_max_candidates)
return candidates
def _next(self):
# the offspring population to finally evaluate and attach to the population
off = Population()
# find the potential optimal solution in the current population
potential_optimal = self._potential_optimal()
# for each of those solutions execute the division move
for current in potential_optimal:
# find the largest dimension the solution has not been evaluated yet
nxl, nxu = norm_bounds(current, self.problem)
k = np.argmax(nxu - nxl)
# the delta value to be used to get left and right - this is one sixth of the range
xl, xu = current.get("xl"), current.get("xu")
delta = (xu[k] - xl[k]) / 6
# create the left individual
left_x = np.copy(current.X)
left_x[k] = xl[k] + delta
left = Individual(X=left_x)
# create the right individual
right_x = np.copy(current.X)
right_x[k] = xu[k] - delta
right = Individual(X=right_x)
# update the boundaries for all the points accordingly
for ind in [current, left, right]:
update_bounds(ind, xl, xu, k, delta)
# create the offspring population, evaluate and attach to current population
_off = Population.create(left, right)
_off.set("depth", current.get("depth") + 1)
off = Population.merge(off, _off)
# evaluate the offsprings
self.evaluator.eval(self.problem, off, algorithm=self)
# add the offsprings to the population
self.pop = Population.merge(self.pop, off)
| {
"pile_set_name": "Github"
} |
using System;
using System.IO;
using System.Text;
using static ATL.ChannelsArrangements;
using System.Collections.Generic;
using System.Globalization;
namespace ATL.AudioData.IO
{
/// <summary>
/// Class for Audible Formats 2 to 4 files manipulation (extensions : .AA)
///
/// Implementation notes
///
/// - Only the editing of existing zones has been tested, not the adding of new zones (e.g. tagging a tagless AA, adding a picture to a pictureless AA)
/// due to the lack of empty test files
///
/// </summary>
class AA : MetaDataIO, IAudioDataIO
{
public const int AA_MAGIC_NUMBER = 1469084982;
public const int TOC_HEADER_TERMINATOR = 1;
public const int TOC_CONTENT_TAGS = 2;
public const int TOC_AUDIO = 10;
public const int TOC_COVER_ART = 11;
public const string CODEC_MP332 = "mp332";
public const string CODEC_ACELP85 = "acelp85";
public const string CODEC_ACELP16 = "acelp16";
public const string ZONE_TOC = "toc";
public const string ZONE_TAGS = "2";
public const string ZONE_IMAGE = "11";
// Mapping between MP4 frame codes and ATL frame codes
private static Dictionary<string, byte> frameMapping = new Dictionary<string, byte>() {
{ "title", TagData.TAG_FIELD_TITLE },
{ "parent_title", TagData.TAG_FIELD_ALBUM },
{ "narrator", TagData.TAG_FIELD_ARTIST },
{ "description", TagData.TAG_FIELD_COMMENT },
{ "pubdate", TagData.TAG_FIELD_PUBLISHING_DATE},
{ "provider", TagData.TAG_FIELD_PUBLISHER},
{ "author", TagData.TAG_FIELD_COMPOSER },
{ "long_description", TagData.TAG_FIELD_GENERAL_DESCRIPTION },
{ "copyright", TagData.TAG_FIELD_COPYRIGHT },
};
private long audioSize;
private string codec;
private AudioDataManager.SizeInfo sizeInfo;
private readonly string fileName;
private Dictionary<int, Tuple<uint, uint>> toc;
// ---------- INFORMATIVE INTERFACE IMPLEMENTATIONS & MANDATORY OVERRIDES
// IAudioDataIO
public bool IsVBR
{
get { return false; }
}
public int CodecFamily
{
get { return AudioDataIOFactory.CF_LOSSY; }
}
public double BitRate
{
get
{
switch (codec)
{
case CODEC_MP332:
return 32 / 8.0;
case CODEC_ACELP16:
return 16 / 8.0;
case CODEC_ACELP85:
return 8.5 / 8.0;
default:
return 1;
}
}
}
public double Duration
{
get { return getDuration(); }
}
public int SampleRate
{
get
{
switch (codec)
{
case CODEC_MP332:
return 22050;
case CODEC_ACELP16:
return 16000;
case CODEC_ACELP85:
return 8500;
default:
return 1;
}
}
}
public string FileName
{
get { return fileName; }
}
public bool IsMetaSupported(int metaDataType)
{
return (metaDataType == MetaDataIOFactory.TAG_NATIVE);
}
public ChannelsArrangement ChannelsArrangement
{
get { return MONO; }
}
// MetaDataIO
protected override int getDefaultTagOffset()
{
return TO_BUILTIN;
}
protected override int getImplementedTagType()
{
return MetaDataIOFactory.TAG_NATIVE;
}
protected override byte getFrameMapping(string zone, string ID, byte tagVersion)
{
byte supportedMetaId = 255;
if (frameMapping.ContainsKey(ID)) supportedMetaId = frameMapping[ID];
return supportedMetaId;
}
protected override bool isLittleEndian
{
get { return false; }
}
// ---------- CONSTRUCTORS & INITIALIZERS
protected void resetData()
{
codec = "";
audioSize = 0;
}
public AA(string fileName)
{
this.fileName = fileName;
resetData();
}
// ********************** Private functions & procedures *********************
// Calculate duration time
private double getDuration()
{
if (0 == BitRate)
return 0;
else
return audioSize / (BitRate * 1000);
}
// Read header data
private void readHeader(BinaryReader source)
{
uint fileSize = StreamUtils.DecodeBEUInt32(source.ReadBytes(4));
int magicNumber = StreamUtils.DecodeBEInt32(source.ReadBytes(4));
if (magicNumber != AA_MAGIC_NUMBER) return;
tagExists = true;
int tocSize = StreamUtils.DecodeBEInt32(source.ReadBytes(4));
source.BaseStream.Seek(4, SeekOrigin.Current); // Even FFMPeg doesn't know what this integer is
// The table of contents describes the layout of the file as triples of integers (<section>, <offset>, <length>)
toc = new Dictionary<int, Tuple<uint, uint>>();
for (int i = 0; i < tocSize; i++)
{
int section = StreamUtils.DecodeBEInt32(source.ReadBytes(4));
uint tocEntryOffset = StreamUtils.DecodeBEUInt32(source.ReadBytes(4));
uint tocEntrySize = StreamUtils.DecodeBEUInt32(source.ReadBytes(4));
Tuple<uint, uint> data = new Tuple<uint, uint>(tocEntryOffset, tocEntrySize);
toc[section] = data;
structureHelper.AddZone(tocEntryOffset, (int)tocEntrySize, section.ToString());
structureHelper.AddIndex(source.BaseStream.Position - 8, tocEntryOffset, false, section.ToString());
if (TOC_AUDIO == section)
{
audioSize = tocEntrySize;
}
if (TOC_CONTENT_TAGS == section)
{
structureHelper.AddSize(source.BaseStream.Position - 4, tocEntrySize, section.ToString());
structureHelper.AddSize(0, fileSize, section.ToString());
}
if (TOC_COVER_ART == section)
{
structureHelper.AddSize(source.BaseStream.Position - 4, tocEntrySize, section.ToString());
structureHelper.AddIndex(source.BaseStream.Position - 8, tocEntryOffset, false, section.ToString());
structureHelper.AddSize(0, fileSize, section.ToString());
}
}
}
private void readTags(BinaryReader source, long offset, long size, ReadTagParams readTagParams)
{
source.BaseStream.Seek(offset, SeekOrigin.Begin);
int nbTags = StreamUtils.DecodeBEInt32(source.ReadBytes(4));
for (int i = 0; i < nbTags; i++)
{
source.BaseStream.Seek(1, SeekOrigin.Current); // No idea what this byte is
int keyLength = StreamUtils.DecodeBEInt32(source.ReadBytes(4));
int valueLength = StreamUtils.DecodeBEInt32(source.ReadBytes(4));
string key = Encoding.UTF8.GetString(source.ReadBytes(keyLength));
string value = Encoding.UTF8.GetString(source.ReadBytes(valueLength)).Trim();
SetMetaField(key, value, readTagParams.ReadAllMetaFrames);
if ("codec".Equals(key)) codec = value;
}
}
private void readCover(BinaryReader source, long offset, PictureInfo.PIC_TYPE pictureType)
{
source.BaseStream.Seek(offset, SeekOrigin.Begin);
int pictureSize = StreamUtils.DecodeBEInt32(source.ReadBytes(4));
int picOffset = StreamUtils.DecodeBEInt32(source.ReadBytes(4));
structureHelper.AddIndex(source.BaseStream.Position - 4, (uint)picOffset, false, ZONE_IMAGE);
source.BaseStream.Seek(picOffset, SeekOrigin.Begin);
PictureInfo picInfo = PictureInfo.fromBinaryData(source.BaseStream, pictureSize, pictureType, getImplementedTagType(), TOC_COVER_ART);
tagData.Pictures.Add(picInfo);
}
private void readChapters(BinaryReader source, long offset, long size)
{
source.BaseStream.Seek(offset, SeekOrigin.Begin);
if (null == tagData.Chapters) tagData.Chapters = new List<ChapterInfo>(); else tagData.Chapters.Clear();
double cumulatedDuration = 0;
int idx = 1;
while (source.BaseStream.Position < offset + size)
{
uint chapterSize = StreamUtils.DecodeBEUInt32(source.ReadBytes(4));
uint chapterOffset = StreamUtils.DecodeBEUInt32(source.ReadBytes(4));
structureHelper.AddZone(chapterOffset, (int)chapterSize, "chp" + idx);
structureHelper.AddIndex(source.BaseStream.Position - 4, chapterOffset, false, "chp" + idx);
ChapterInfo chapter = new ChapterInfo();
chapter.Title = "Chapter " + idx++; // Chapters have no title metatada in the AA format
chapter.StartTime = (uint)Math.Round(cumulatedDuration);
cumulatedDuration += chapterSize / (BitRate * 1000);
chapter.EndTime = (uint)Math.Round(cumulatedDuration);
tagData.Chapters.Add(chapter);
source.BaseStream.Seek(chapterSize, SeekOrigin.Current);
}
}
// Read data from file
public bool Read(BinaryReader source, AudioDataManager.SizeInfo sizeInfo, MetaDataIO.ReadTagParams readTagParams)
{
this.sizeInfo = sizeInfo;
return read(source, readTagParams);
}
protected override bool read(BinaryReader source, ReadTagParams readTagParams)
{
bool result = true;
ResetData();
readHeader(source);
if (toc.ContainsKey(TOC_CONTENT_TAGS))
{
readTags(source, toc[TOC_CONTENT_TAGS].Item1, toc[TOC_CONTENT_TAGS].Item2, readTagParams);
}
if (toc.ContainsKey(TOC_COVER_ART))
{
if (readTagParams.ReadPictures)
readCover(source, toc[TOC_COVER_ART].Item1, PictureInfo.PIC_TYPE.Generic);
else
addPictureToken(PictureInfo.PIC_TYPE.Generic);
}
readChapters(source, toc[TOC_AUDIO].Item1, toc[TOC_AUDIO].Item2);
return result;
}
protected new string formatBeforeWriting(byte frameType, TagData tag, IDictionary<byte, string> map)
{
string result = base.formatBeforeWriting(frameType, tag, map);
// Convert to expected date format
if (TagData.TAG_FIELD_PUBLISHING_DATE == frameType)
{
DateTime date = DateTime.Parse(result);
result = date.ToString("dd-MMM-yyyy", CultureInfo.CreateSpecificCulture("en-US")).ToUpper();
}
return result;
}
protected override int write(TagData tag, BinaryWriter w, string zone)
{
int result = -1; // Default : leave as is
if (zone.Equals(ZONE_TAGS))
{
long nbTagsOffset = w.BaseStream.Position;
w.Write(0); // Number of tags; will be rewritten at the end of the method
// Mapped textual fields
IDictionary<byte, string> map = tag.ToMap();
foreach (byte frameType in map.Keys)
{
if (map[frameType].Length > 0) // No frame with empty value
{
foreach (string s in frameMapping.Keys)
{
if (frameType == frameMapping[s])
{
string value = formatBeforeWriting(frameType, tag, map);
writeTagField(w, s, value);
result++;
break;
}
}
}
}
// Other textual fields
foreach (MetaFieldInfo fieldInfo in tag.AdditionalFields)
{
if ((fieldInfo.TagType.Equals(MetaDataIOFactory.TAG_ANY) || fieldInfo.TagType.Equals(getImplementedTagType())) && !fieldInfo.MarkedForDeletion)
{
writeTagField(w, fieldInfo.NativeFieldCode, fieldInfo.Value);
result++;
}
}
w.BaseStream.Seek(nbTagsOffset, SeekOrigin.Begin);
w.Write(StreamUtils.EncodeBEInt32(result)); // Number of tags
}
if (zone.Equals(ZONE_IMAGE))
{
result = 0;
foreach (PictureInfo picInfo in tag.Pictures)
{
// Picture has either to be supported, or to come from the right tag standard
bool doWritePicture = !picInfo.PicType.Equals(PictureInfo.PIC_TYPE.Unsupported);
if (!doWritePicture) doWritePicture = (getImplementedTagType() == picInfo.TagType);
// It also has not to be marked for deletion
doWritePicture = doWritePicture && (!picInfo.MarkedForDeletion);
if (doWritePicture)
{
writePictureFrame(w, picInfo.PictureData);
return 1; // Stop here; there can only be one picture in an AA file
}
}
}
return result;
}
private void writeTagField(BinaryWriter w, string key, string value)
{
w.Write('\0'); // Unknown byte; always zero
byte[] keyB = Encoding.UTF8.GetBytes(key);
byte[] valueB = Encoding.UTF8.GetBytes(value);
w.Write(StreamUtils.EncodeBEInt32(keyB.Length)); // Key length
w.Write(StreamUtils.EncodeBEInt32(valueB.Length)); // Value length
w.Write(keyB);
w.Write(valueB);
}
private void writePictureFrame(BinaryWriter w, byte[] pictureData)
{
w.Write(StreamUtils.EncodeBEInt32(pictureData.Length)); // Pic size
w.Write(0); // Pic data absolute offset; to be rewritten later
w.Write(pictureData);
}
}
} | {
"pile_set_name": "Github"
} |
/* Copyright (C) 1995-1998 Eric Young ([email protected])
* All rights reserved.
*
* This package is an SSL implementation written
* by Eric Young ([email protected]).
* The implementation was written so as to conform with Netscapes SSL.
*
* This library is free for commercial and non-commercial use as long as
* the following conditions are aheared to. The following conditions
* apply to all code found in this distribution, be it the RC4, RSA,
* lhash, DES, etc., code; not just the SSL code. The SSL documentation
* included with this distribution is covered by the same copyright terms
* except that the holder is Tim Hudson ([email protected]).
*
* Copyright remains Eric Young's, and as such any Copyright notices in
* the code are not to be removed.
* If this package is used in a product, Eric Young should be given attribution
* as the author of the parts of the library used.
* This can be in the form of a textual message at program startup or
* in documentation (online or textual) provided with the package.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. All advertising materials mentioning features or use of this software
* must display the following acknowledgement:
* "This product includes cryptographic software written by
* Eric Young ([email protected])"
* The word 'cryptographic' can be left out if the rouines from the library
* being used are not cryptographic related :-).
* 4. If you include any Windows specific code (or a derivative thereof) from
* the apps directory (application code) you must include an acknowledgement:
* "This product includes software written by Tim Hudson ([email protected])"
*
* THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*
* The licence and distribution terms for any publically available version or
* derivative of this code cannot be changed. i.e. this code cannot simply be
* copied and put under another distribution licence
* [including the GNU Public Licence.] */
#include <openssl/bio.h>
#include <limits.h>
#include <string.h>
#include "../internal.h"
/* hexdump_ctx contains the state of a hexdump. */
struct hexdump_ctx {
BIO *bio;
char right_chars[18]; /* the contents of the right-hand side, ASCII dump. */
unsigned used; /* number of bytes in the current line. */
size_t n; /* number of bytes total. */
unsigned indent;
};
static void hexbyte(char *out, uint8_t b) {
static const char hextable[] = "0123456789abcdef";
out[0] = hextable[b>>4];
out[1] = hextable[b&0x0f];
}
static char to_char(uint8_t b) {
if (b < 32 || b > 126) {
return '.';
}
return b;
}
/* hexdump_write adds |len| bytes of |data| to the current hex dump described by
* |ctx|. */
static int hexdump_write(struct hexdump_ctx *ctx, const uint8_t *data,
size_t len) {
char buf[10];
unsigned l;
/* Output lines look like:
* 00000010 2e 2f 30 31 32 33 34 35 36 37 38 ... 3c 3d // |./0123456789:;<=|
* ^ offset ^ extra space ^ ASCII of line
*/
for (size_t i = 0; i < len; i++) {
if (ctx->used == 0) {
/* The beginning of a line. */
BIO_indent(ctx->bio, ctx->indent, UINT_MAX);
hexbyte(&buf[0], ctx->n >> 24);
hexbyte(&buf[2], ctx->n >> 16);
hexbyte(&buf[4], ctx->n >> 8);
hexbyte(&buf[6], ctx->n);
buf[8] = buf[9] = ' ';
if (BIO_write(ctx->bio, buf, 10) < 0) {
return 0;
}
}
hexbyte(buf, data[i]);
buf[2] = ' ';
l = 3;
if (ctx->used == 7) {
/* There's an additional space after the 8th byte. */
buf[3] = ' ';
l = 4;
} else if (ctx->used == 15) {
/* At the end of the line there's an extra space and the bar for the
* right column. */
buf[3] = ' ';
buf[4] = '|';
l = 5;
}
if (BIO_write(ctx->bio, buf, l) < 0) {
return 0;
}
ctx->right_chars[ctx->used] = to_char(data[i]);
ctx->used++;
ctx->n++;
if (ctx->used == 16) {
ctx->right_chars[16] = '|';
ctx->right_chars[17] = '\n';
if (BIO_write(ctx->bio, ctx->right_chars, sizeof(ctx->right_chars)) < 0) {
return 0;
}
ctx->used = 0;
}
}
return 1;
}
/* finish flushes any buffered data in |ctx|. */
static int finish(struct hexdump_ctx *ctx) {
/* See the comments in |hexdump| for the details of this format. */
const unsigned n_bytes = ctx->used;
unsigned l;
char buf[5];
if (n_bytes == 0) {
return 1;
}
OPENSSL_memset(buf, ' ', 4);
buf[4] = '|';
for (; ctx->used < 16; ctx->used++) {
l = 3;
if (ctx->used == 7) {
l = 4;
} else if (ctx->used == 15) {
l = 5;
}
if (BIO_write(ctx->bio, buf, l) < 0) {
return 0;
}
}
ctx->right_chars[n_bytes] = '|';
ctx->right_chars[n_bytes + 1] = '\n';
if (BIO_write(ctx->bio, ctx->right_chars, n_bytes + 2) < 0) {
return 0;
}
return 1;
}
int BIO_hexdump(BIO *bio, const uint8_t *data, size_t len, unsigned indent) {
struct hexdump_ctx ctx;
OPENSSL_memset(&ctx, 0, sizeof(ctx));
ctx.bio = bio;
ctx.indent = indent;
if (!hexdump_write(&ctx, data, len) || !finish(&ctx)) {
return 0;
}
return 1;
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright 2020 ThoughtWorks, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import {bind} from "classnames/bind";
import {ApiResult} from "helpers/api_request_builder";
import {MithrilViewComponent} from "jsx/mithril-component";
import m from "mithril";
import {AgentConfigState} from "models/agents/agents";
import {EnvironmentsService, ResourcesService} from "models/agents/agents_crud";
import {StaticAgentsVM} from "models/agents/agents_vm";
import {ButtonGroup} from "views/components/buttons";
import * as Buttons from "views/components/buttons";
import {FlashMessageModelWithTimeout} from "views/components/flash_message";
import {SearchField} from "views/components/forms/input_fields";
import {KeyValuePair} from "views/components/key_value_pair";
import {EnvironmentsDropdownButton} from "views/pages/agents/environment_dropdown_button";
import {ResourcesDropdownButton} from "views/pages/agents/resources_dropdown_button";
import style from "./index.scss";
const classnames = bind(style);
interface AgentHeaderPanelAttrs {
agentsVM: StaticAgentsVM;
onEnable: (e: MouseEvent) => void;
onDisable: (e: MouseEvent) => void;
onDelete: (e: MouseEvent) => void;
flashMessage: FlashMessageModelWithTimeout;
updateEnvironments: (environmentsToAdd: string[], environmentsToRemove: string[]) => Promise<ApiResult<string>>;
updateResources: (resourcesToAdd: string[], resourcesToRemove: string[]) => Promise<ApiResult<string>>;
}
export class AgentHeaderPanel extends MithrilViewComponent<AgentHeaderPanelAttrs> {
view(vnode: m.Vnode<AgentHeaderPanelAttrs, this>) {
const agentsVM = vnode.attrs.agentsVM;
return (<div class={style.headerPanel}>
<div class={style.leftContainer}>
<ButtonGroup>
<Buttons.Primary data-test-id="delete-agents"
disabled={AgentHeaderPanel.isNoneSelected(agentsVM)}
onclick={vnode.attrs.onDelete}>DELETE</Buttons.Primary>
<Buttons.Primary data-test-id="enable-agents"
disabled={AgentHeaderPanel.isNoneSelected(agentsVM)}
onclick={vnode.attrs.onEnable}>ENABLE</Buttons.Primary>
<Buttons.Primary data-test-id="disable-agents"
disabled={AgentHeaderPanel.isNoneSelected(agentsVM)}
onclick={vnode.attrs.onDisable}>DISABLE</Buttons.Primary>
<EnvironmentsDropdownButton show={agentsVM.showEnvironments}
agentsVM={agentsVM}
updateEnvironments={vnode.attrs.updateEnvironments}
flashMessage={vnode.attrs.flashMessage}
service={new EnvironmentsService()}/>
<ResourcesDropdownButton show={agentsVM.showResources}
agentsVM={agentsVM}
updateResources={vnode.attrs.updateResources}
flashMessage={vnode.attrs.flashMessage}
service={new ResourcesService()}/>
</ButtonGroup>
<KeyValuePair inline={true} data={new Map(
[
["Total", this.span(agentsVM.list().length)],
["Pending", this.span(agentsVM.filterBy(AgentConfigState.Pending).length)],
["Enabled", this.span(agentsVM.filterBy(AgentConfigState.Enabled).length, style.enabled)],
["Disabled", this.span(agentsVM.filterBy(AgentConfigState.Disabled).length, style.disabled)]
])
}/>
</div>
<SearchField placeholder="Filter Agents" label="Search for agents" property={agentsVM.filterText}/>
</div>);
}
private static isNoneSelected(agentsVM: StaticAgentsVM) {
return agentsVM.selectedAgentsUUID().length === 0;
}
private span(count: number, className: string = ""): m.Children {
return <span class={classnames(style.count, className)}>{count}</span>;
}
}
| {
"pile_set_name": "Github"
} |
(ob-sml :fetcher github :repo "swannodette/ob-sml")
| {
"pile_set_name": "Github"
} |
package com.bitlove.fetlife.model.resource.post
import com.bitlove.fetlife.FetLifeApplication
import com.bitlove.fetlife.getLoggedInUser
import com.bitlove.fetlife.logic.dataholder.CardViewDataHolder
import com.bitlove.fetlife.model.dataobject.SyncObject
import com.bitlove.fetlife.model.dataobject.entity.content.FavoriteEntity
import com.bitlove.fetlife.model.dataobject.entity.content.ReactionEntity
import com.bitlove.fetlife.model.dataobject.wrapper.Content
import com.bitlove.fetlife.model.dataobject.wrapper.Favoritable
import com.bitlove.fetlife.model.dataobject.wrapper.Member
import com.bitlove.fetlife.model.dataobject.wrapper.Reaction
import com.bitlove.fetlife.model.db.FetLifeContentDatabase
import com.bitlove.fetlife.model.network.job.post.PostReactionJob
class SetFavoriteResource(favoritable: Favoritable, userId: String? = getLoggedInUser()?.getLocalId()) : PostResource<Favoritable>(favoritable, userId) {
override fun saveToDb(contentDb: FetLifeContentDatabase, favoritable: Favoritable) {
val data = favoritable?.getChild()?: favoritable
val currentFavoriteEntity = favoritable.getFavoriteEntity()
if (currentFavoriteEntity != null) {
contentDb.favoriteDao().delete(currentFavoriteEntity)
} else {
val favoriteEntity = FavoriteEntity()
when(data) {
is Content -> {
favoriteEntity.contentId = data.getLocalId()
}
is Member -> {
favoriteEntity.memberId = data.getLocalId()
}
else -> return
}
contentDb.favoriteDao().insertOrUpdate(favoriteEntity)
}
}
override fun shouldSync(favoritable: Favoritable): Boolean {
return false
}
override fun syncWithNetwork(favoritable: Favoritable) {
}
} | {
"pile_set_name": "Github"
} |
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package leaderelection
import (
"net/http"
"sync"
"time"
)
// HealthzAdaptor associates the /healthz endpoint with the LeaderElection object.
// It helps deal with the /healthz endpoint being set up prior to the LeaderElection.
// This contains the code needed to act as an adaptor between the leader
// election code the health check code. It allows us to provide health
// status about the leader election. Most specifically about if the leader
// has failed to renew without exiting the process. In that case we should
// report not healthy and rely on the kubelet to take down the process.
type HealthzAdaptor struct {
pointerLock sync.Mutex
le *LeaderElector
timeout time.Duration
}
// Name returns the name of the health check we are implementing.
func (l *HealthzAdaptor) Name() string {
return "leaderElection"
}
// Check is called by the healthz endpoint handler.
// It fails (returns an error) if we own the lease but had not been able to renew it.
func (l *HealthzAdaptor) Check(req *http.Request) error {
l.pointerLock.Lock()
defer l.pointerLock.Unlock()
if l.le == nil {
return nil
}
return l.le.Check(l.timeout)
}
// SetLeaderElection ties a leader election object to a HealthzAdaptor
func (l *HealthzAdaptor) SetLeaderElection(le *LeaderElector) {
l.pointerLock.Lock()
defer l.pointerLock.Unlock()
l.le = le
}
// NewLeaderHealthzAdaptor creates a basic healthz adaptor to monitor a leader election.
// timeout determines the time beyond the lease expiry to be allowed for timeout.
// checks within the timeout period after the lease expires will still return healthy.
func NewLeaderHealthzAdaptor(timeout time.Duration) *HealthzAdaptor {
result := &HealthzAdaptor{
timeout: timeout,
}
return result
}
| {
"pile_set_name": "Github"
} |
<?php
namespace Minds\Controllers\api\v2\newsfeed;
use Minds\Api\Factory;
use Minds\Core;
use Minds\Entities;
use Minds\Entities\Activity;
use Minds\Interfaces;
class pin implements Interfaces\Api
{
public function get($pages)
{
return Factory::response([]);
}
public function post($pages)
{
if (!isset($pages[0])) {
return Factory::response(['status' => 'error', 'message' => 'You must send an Activity GUID']);
}
/** @var Activity $activity */
$activity = Entities\Factory::build($pages[0]);
$user = Core\Session::getLoggedinUser();
if ($activity->container_guid != $user->guid) {
$group = Entities\Factory::build($activity->container_guid);
if ($group->isModerator($user) || $group->isOwner($user)) {
$group->addPinned($activity->guid);
$group->save();
} else {
return Factory::response([
'status' => 'error',
'message' => 'You do not not have permission to pin to this group',
]);
}
} else {
$user->addPinned($activity->guid);
$user->save();
}
return Factory::response([]);
}
public function put($pages)
{
return Factory::response([]);
}
public function delete($pages)
{
if (!isset($pages[0])) {
return Factory::response(['status' => 'error', 'message' => 'You must send an Activity GUID']);
}
/** @var Activity $activity */
$activity = Entities\Factory::build($pages[0]);
$user = Core\Session::getLoggedinUser();
if ($activity->container_guid != $user->guid) {
$group = Entities\Factory::build($activity->container_guid);
if ($group->isModerator($user) || $group->isOwner($user)) {
$group->removePinned($activity->guid);
$group->save();
} else {
return Factory::response([
'status' => 'error',
'message' => 'You do not not have permission to pin to this group',
]);
}
} else {
$user->removePinned($activity->guid);
$user->save();
}
return Factory::response([]);
}
}
| {
"pile_set_name": "Github"
} |
/*
* Copyright (C) 2013 Company 100 Inc.
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#if USE(SOUP)
#include "SharedBuffer.h"
#include <libsoup/soup.h>
namespace WebCore {
SharedBuffer::SharedBuffer(SoupBuffer* soupBuffer)
{
ASSERT(soupBuffer);
m_size = soupBuffer->length;
m_segments.append({0, DataSegment::create(GUniquePtr<SoupBuffer>(soupBuffer))});
}
Ref<SharedBuffer> SharedBuffer::wrapSoupBuffer(SoupBuffer* soupBuffer)
{
return adoptRef(*new SharedBuffer(soupBuffer));
}
GUniquePtr<SoupBuffer> SharedBuffer::createSoupBuffer(unsigned offset, unsigned size)
{
ref();
GUniquePtr<SoupBuffer> buffer(soup_buffer_new_with_owner(data() + offset, size ? size : this->size(), this, [](void* data) {
static_cast<SharedBuffer*>(data)->deref();
}));
return buffer;
}
} // namespace WebCore
#endif
| {
"pile_set_name": "Github"
} |
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<ItemGroup>
<ClCompile Include="Debug.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="DriverEntry.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="DriverIo.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="IntelPt.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="KernelTracing.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="PtTests.cpp">
<Filter>Source Files</Filter>
</ClCompile>
<ClCompile Include="IntelPtXSave.cpp">
<Filter>Source Files</Filter>
</ClCompile>
</ItemGroup>
<ItemGroup>
<ClInclude Include="Debug.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="DriverEntry.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="DriverIo.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="Intel_Defs.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="IntelPt.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="resource.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="stdafx.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="KernelTracing.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="UndocNt.h">
<Filter>Header Files</Filter>
</ClInclude>
<ClInclude Include="IntelPtXSave.h">
<Filter>Header Files</Filter>
</ClInclude>
</ItemGroup>
<ItemGroup>
<Filter Include="Source Files">
<UniqueIdentifier>{0a59f678-349f-4b75-b196-ac6e6f051185}</UniqueIdentifier>
</Filter>
<Filter Include="Header Files">
<UniqueIdentifier>{cc981622-83ff-4000-be86-2e79af42be21}</UniqueIdentifier>
</Filter>
<Filter Include="Resources">
<UniqueIdentifier>{4b959927-b2e1-4594-8fff-e51a0d8bfc8a}</UniqueIdentifier>
</Filter>
<Filter Include="Assembly Files">
<UniqueIdentifier>{0d7a4089-f791-4f8d-b924-3adb4c817f29}</UniqueIdentifier>
</Filter>
</ItemGroup>
<ItemGroup>
<None Include="readme.md" />
<None Include="Export.def" />
</ItemGroup>
<ItemGroup>
<ResourceCompile Include="WindowsIntelPtDriver.rc">
<Filter>Resources</Filter>
</ResourceCompile>
</ItemGroup>
<ItemGroup>
<MASM Include="asm\Amd64XSave.asm">
<Filter>Assembly Files</Filter>
</MASM>
</ItemGroup>
</Project> | {
"pile_set_name": "Github"
} |
//
// CalendarHomeViewController.m
// Calendar
//
// Created by 张凡 on 14-6-23.
// Copyright (c) 2014年 张凡. All rights reserved.
//
// 版权属于原作者
// http://code4app.com (cn) http://code4app.net (en)
// 发布代码于最专业的源码分享网站: Code4App.com
#import "CalendarHomeViewController.h"
#import "Color.h"
@interface CalendarHomeViewController ()
{
int daynumber;//天数
int optiondaynumber;//选择日期数量
// NSMutableArray *optiondayarray;//存放选择好的日期对象数组
}
@end
@implementation CalendarHomeViewController
- (id)initWithNibName:(NSString *)nibNameOrNil bundle:(NSBundle *)nibBundleOrNil
{
self = [super initWithNibName:nibNameOrNil bundle:nibBundleOrNil];
if (self) {
// Custom initialization
}
return self;
}
- (void)viewDidLoad
{
[super viewDidLoad];
// Do any additional setup after loading the view.
}
- (void)didReceiveMemoryWarning
{
[super didReceiveMemoryWarning];
// Dispose of any resources that can be recreated.
}
#pragma mark - 设置方法
//飞机初始化方法
- (void)setAirPlaneToDay:(int)day ToDateforString:(NSString *)todate
{
daynumber = day;
optiondaynumber = 500;//选择一个后返回数据对象
super.calendarMonth = [self getMonthArrayOfDayNumber:daynumber ToDateforString:todate];
[super.collectionView reloadData];//刷新
}
//酒店初始化方法
- (void)setHotelToDay:(int)day ToDateforString:(NSString *)todate
{
daynumber = day;
optiondaynumber = 2;//选择两个后返回数据对象
super.calendarMonth = [self getMonthArrayOfDayNumber:daynumber ToDateforString:todate];
[super.collectionView reloadData];//刷新
}
//火车初始化方法
- (void)setTrainToDay:(int)day ToDateforString:(NSString *)todate
{
daynumber = day;
optiondaynumber = 1;//选择一个后返回数据对象
super.calendarMonth = [self getMonthArrayOfDayNumber:daynumber ToDateforString:todate];
[super.collectionView reloadData];//刷新
}
#pragma mark - 逻辑代码初始化
//获取时间段内的天数数组
- (NSMutableArray *)getMonthArrayOfDayNumber:(int)day ToDateforString:(NSString *)todate
{
NSDate *date = [NSDate date];
NSDate *selectdate = [NSDate date];
if (todate) {
selectdate = [selectdate dateFromString:todate];
}
super.Logic = [[CalendarLogic alloc]init];
return [super.Logic reloadCalendarView:date selectDate:selectdate needDays:day];
}
#pragma mark - 设置标题
- (void)setCalendartitle:(NSString *)calendartitle
{
[self.navigationItem setTitle:calendartitle];
[self.navigationController.navigationBar setTitleTextAttributes:
@{NSFontAttributeName:[UIFont systemFontOfSize:19],
NSForegroundColorAttributeName:[UIColor blackColor]}];
}
@end
| {
"pile_set_name": "Github"
} |
///////////////////////////////////////////////////////////////////////////////
/// \file tags.hpp
/// Contains the tags for all the overloadable operators in C++
//
// Copyright 2008 Eric Niebler. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
#ifndef BOOST_PROTO_TAGS_HPP_EAN_04_01_2005
#define BOOST_PROTO_TAGS_HPP_EAN_04_01_2005
#include <boost/proto/proto_fwd.hpp>
namespace boost { namespace proto { namespace tagns_ { namespace tag
{
/// Tag type for terminals; aka, leaves in the expression tree.
struct terminal {};
/// Tag type for the unary + operator.
struct unary_plus {};
/// Tag type for the unary - operator.
struct negate {};
/// Tag type for the unary * operator.
struct dereference {};
/// Tag type for the unary ~ operator.
struct complement {};
/// Tag type for the unary & operator.
struct address_of {};
/// Tag type for the unary ! operator.
struct logical_not {};
/// Tag type for the unary prefix ++ operator.
struct pre_inc {};
/// Tag type for the unary prefix -- operator.
struct pre_dec {};
/// Tag type for the unary postfix ++ operator.
struct post_inc {};
/// Tag type for the unary postfix -- operator.
struct post_dec {};
/// Tag type for the binary \<\< operator.
struct shift_left {};
/// Tag type for the binary \>\> operator.
struct shift_right {};
/// Tag type for the binary * operator.
struct multiplies {};
/// Tag type for the binary / operator.
struct divides {};
/// Tag type for the binary % operator.
struct modulus {};
/// Tag type for the binary + operator.
struct plus {};
/// Tag type for the binary - operator.
struct minus {};
/// Tag type for the binary \< operator.
struct less {};
/// Tag type for the binary \> operator.
struct greater {};
/// Tag type for the binary \<= operator.
struct less_equal {};
/// Tag type for the binary \>= operator.
struct greater_equal {};
/// Tag type for the binary == operator.
struct equal_to {};
/// Tag type for the binary != operator.
struct not_equal_to {};
/// Tag type for the binary || operator.
struct logical_or {};
/// Tag type for the binary && operator.
struct logical_and {};
/// Tag type for the binary & operator.
struct bitwise_and {};
/// Tag type for the binary | operator.
struct bitwise_or {};
/// Tag type for the binary ^ operator.
struct bitwise_xor {};
/// Tag type for the binary , operator.
struct comma {};
/// Tag type for the binary ->* operator.
struct mem_ptr {};
/// Tag type for the binary = operator.
struct assign {};
/// Tag type for the binary \<\<= operator.
struct shift_left_assign {};
/// Tag type for the binary \>\>= operator.
struct shift_right_assign {};
/// Tag type for the binary *= operator.
struct multiplies_assign {};
/// Tag type for the binary /= operator.
struct divides_assign {};
/// Tag type for the binary %= operator.
struct modulus_assign {};
/// Tag type for the binary += operator.
struct plus_assign {};
/// Tag type for the binary -= operator.
struct minus_assign {};
/// Tag type for the binary &= operator.
struct bitwise_and_assign {};
/// Tag type for the binary |= operator.
struct bitwise_or_assign {};
/// Tag type for the binary ^= operator.
struct bitwise_xor_assign {};
/// Tag type for the binary subscript operator.
struct subscript {};
/// Tag type for the binary virtual data members.
struct member {};
/// Tag type for the ternary ?: conditional operator.
struct if_else_ {};
/// Tag type for the n-ary function call operator.
struct function {};
}}}}
#endif
| {
"pile_set_name": "Github"
} |
from . import decompiler, disassembler
| {
"pile_set_name": "Github"
} |
/**********************************************************************
*These solidity codes have been obtained from Etherscan for extracting
*the smartcontract related info.
*The data will be used by MATRIX AI team as the reference basis for
*MATRIX model analysis,extraction of contract semantics,
*as well as AI based data analysis, etc.
**********************************************************************/
pragma solidity ^0.4.2;
contract Sign {
address public AddAuthority;
mapping (uint32 => bytes32) Cert;
// =============================================
function Sign() {
AddAuthority = msg.sender;
Cert [0] = 0x7a1d671e46f713a33286d4b4215796c8d396fd0e7cedf0b4e01d071df0f1412a;
Cert [1] = 0x5705f82396973f8f3861f1c29d7962e3234ff732723e39689ca7e7c030580000;
}
function () {throw;} // reverse
function destroy() {if (msg.sender == AddAuthority) {selfdestruct(AddAuthority);}}
function SetCert (uint32 _IndiceIndex, bytes32 _Cert) {
if (msg.sender == AddAuthority) {
Cert [_IndiceIndex] = _Cert;
}
}
function GetCert (uint32 _IndiceIndex) returns (bytes32 _Valeur) {
_Valeur = Cert [_IndiceIndex];
return _Valeur;
}
} | {
"pile_set_name": "Github"
} |
/* Copyright 2010-present MongoDB Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
using System;
namespace MongoDB.Bson.Serialization.IdGenerators
{
/// <summary>
/// Represents an Id generator for Guids.
/// </summary>
public class GuidGenerator : IIdGenerator
{
// private static fields
private static GuidGenerator __instance = new GuidGenerator();
// constructors
/// <summary>
/// Initializes a new instance of the GuidGenerator class.
/// </summary>
public GuidGenerator()
{
}
// public static properties
/// <summary>
/// Gets an instance of GuidGenerator.
/// </summary>
public static GuidGenerator Instance
{
get { return __instance; }
}
// public methods
/// <summary>
/// Generates an Id for a document.
/// </summary>
/// <param name="container">The container of the document (will be a MongoCollection when called from the C# driver). </param>
/// <param name="document">The document.</param>
/// <returns>An Id.</returns>
public object GenerateId(object container, object document)
{
return Guid.NewGuid();
}
/// <summary>
/// Tests whether an Id is empty.
/// </summary>
/// <param name="id">The Id.</param>
/// <returns>True if the Id is empty.</returns>
public bool IsEmpty(object id)
{
return id == null || (Guid)id == Guid.Empty;
}
}
}
| {
"pile_set_name": "Github"
} |
# Docker Travis container notes
This method requires Docker installed on your local machine. This also assumes that you start at the top of the Libint source tree.
1. Create a Travis-CI 'Bionic' docker image: `cd bin; ./docker-travis-build.sh`
2. Run shell in a container using the newly created image: `docker run -it libint-travis-debug bash -l`; if you want to run debugger you want to add `--privileged` flag, as in: `docker run --privileged -it libint-travis-debug bash -l`
3. `cd /home/travis/_build`
4. Configure the job to use the appropriate compiler, compiler version, and debug/release build type:
* `export BUILD_TYPE=B`, where `B` is `Debug` or `Release`
* If want to use GNU C++ compiler (gcc):
* `export GCC_VERSION=VVV` where `VVV` should be the GCC version to be used. The currently valid values are `6`, `7`, `8` and `9`.
* `export CXX=g++`
* If want to use Clang C++ compiler (clang++):
* `export GCC_VERSION=8`
* `export CLANG_VERSION=VVV` where `VVV` should be the Clang version to be used. The currently valid values are `8` and `9`.
* `export CXX=clang++`
* `apt-get update && apt-get install libc++-${CLANG_VERSION}-dev libc++abi-${CLANG_VERSION}-dev`
5. Build and run tests: `./build.sh`
| {
"pile_set_name": "Github"
} |
name = "StackCollections"
uuid = "501fa84d-4592-420e-a352-d1184606d913"
repo = "https://github.com/jakobnissen/StackCollections.jl.git"
| {
"pile_set_name": "Github"
} |
/**
* Refreshcontrol Component Demo for SaltUI
* @author xiaohe.wp
*
* Copyright 2014-2017, SaltUI Team.
* All rights reserved.
*/
import 'salt-context';
import './RefreshcontrolDemo.styl';
import Demo from './RefreshcontrolDemo';
// window.FastClick && FastClick.attach(document.body);
// 渲染demo
export default Demo;
| {
"pile_set_name": "Github"
} |
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build gccgo,linux,amd64
package unix
import "syscall"
//extern gettimeofday
func realGettimeofday(*Timeval, *byte) int32
func gettimeofday(tv *Timeval) (err syscall.Errno) {
r := realGettimeofday(tv, nil)
if r < 0 {
return syscall.GetErrno()
}
return 0
}
| {
"pile_set_name": "Github"
} |
<!-- Instructions: https://github.com/PolymerElements/iron-meta/CONTRIBUTING.md#filing-issues -->
### Description
<!-- Example: The `paper-foo` element causes the page to turn pink when clicked. -->
### Expected outcome
<!-- Example: The page stays the same color. -->
### Actual outcome
<!-- Example: The page turns pink. -->
### Live Demo
<!-- Example: https://jsbin.com/cagaye/edit?html,output -->
### Steps to reproduce
<!-- Example
1. Put a `paper-foo` element in the page.
2. Open the page in a web browser.
3. Click the `paper-foo` element.
-->
### Browsers Affected
<!-- Check all that apply -->
- [ ] Chrome
- [ ] Firefox
- [ ] Safari 9
- [ ] Safari 8
- [ ] Safari 7
- [ ] Edge
- [ ] IE 11
- [ ] IE 10
| {
"pile_set_name": "Github"
} |
<!-- This comment will put IE 6, 7 and 8 in quirks mode -->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<title>CICE: Data Fields</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javaScript" src="search/search.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css"/>
</head>
<body onload='searchBox.OnSelectItem(0);'>
<!-- Generated by Doxygen 1.6.1 -->
<script type="text/javascript"><!--
var searchBox = new SearchBox("searchBox", "search",false,'Search');
--></script>
<div class="navigation" id="top">
<div class="tabs">
<ul>
<li><a href="index.html"><span>Main Page</span></a></li>
<li class="current"><a href="namespaces.html"><span>Modules</span></a></li>
<li><a href="annotated.html"><span>Data Types List</span></a></li>
<li><a href="files.html"><span>Files</span></a></li>
<li>
<div id="MSearchBox" class="MSearchBoxInactive">
<img id="MSearchSelect" src="search/search.png"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
alt=""/>
<input type="text" id="MSearchField" value="Search" accesskey="S"
onfocus="searchBox.OnSearchFieldFocus(true)"
onblur="searchBox.OnSearchFieldFocus(false)"
onkeyup="searchBox.OnSearchFieldChange(event)"/>
<a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
</div>
</li>
</ul>
</div>
<div class="tabs">
<ul>
<li><a href="namespaces.html"><span>Modules List</span></a></li>
<li class="current"><a href="namespacemembers.html"><span>Module Members</span></a></li>
</ul>
</div>
<div class="tabs">
<ul>
<li class="current"><a href="namespacemembers.html"><span>All</span></a></li>
<li><a href="namespacemembers_func.html"><span>Functions/Subroutines</span></a></li>
<li><a href="namespacemembers_vars.html"><span>Variables</span></a></li>
</ul>
</div>
<div class="tabs">
<ul>
<li><a href="namespacemembers.html#index_a"><span>a</span></a></li>
<li><a href="namespacemembers_0x62.html#index_b"><span>b</span></a></li>
<li><a href="namespacemembers_0x63.html#index_c"><span>c</span></a></li>
<li><a href="namespacemembers_0x64.html#index_d"><span>d</span></a></li>
<li><a href="namespacemembers_0x65.html#index_e"><span>e</span></a></li>
<li><a href="namespacemembers_0x66.html#index_f"><span>f</span></a></li>
<li><a href="namespacemembers_0x67.html#index_g"><span>g</span></a></li>
<li><a href="namespacemembers_0x68.html#index_h"><span>h</span></a></li>
<li><a href="namespacemembers_0x69.html#index_i"><span>i</span></a></li>
<li class="current"><a href="namespacemembers_0x6a.html#index_j"><span>j</span></a></li>
<li><a href="namespacemembers_0x6b.html#index_k"><span>k</span></a></li>
<li><a href="namespacemembers_0x6c.html#index_l"><span>l</span></a></li>
<li><a href="namespacemembers_0x6d.html#index_m"><span>m</span></a></li>
<li><a href="namespacemembers_0x6e.html#index_n"><span>n</span></a></li>
<li><a href="namespacemembers_0x6f.html#index_o"><span>o</span></a></li>
<li><a href="namespacemembers_0x70.html#index_p"><span>p</span></a></li>
<li><a href="namespacemembers_0x71.html#index_q"><span>q</span></a></li>
<li><a href="namespacemembers_0x72.html#index_r"><span>r</span></a></li>
<li><a href="namespacemembers_0x73.html#index_s"><span>s</span></a></li>
<li><a href="namespacemembers_0x74.html#index_t"><span>t</span></a></li>
<li><a href="namespacemembers_0x75.html#index_u"><span>u</span></a></li>
<li><a href="namespacemembers_0x76.html#index_v"><span>v</span></a></li>
<li><a href="namespacemembers_0x77.html#index_w"><span>w</span></a></li>
<li><a href="namespacemembers_0x78.html#index_x"><span>x</span></a></li>
<li><a href="namespacemembers_0x79.html#index_y"><span>y</span></a></li>
<li><a href="namespacemembers_0x7a.html#index_z"><span>z</span></a></li>
</ul>
</div>
</div>
<div class="contents">
Here is a list of all module members with links to the module documentation for each member:
<h3><a class="anchor" id="index_j">- j -</a></h3><ul>
<li>j_global
: <a class="el" href="namespaceice__blocks.html#aabcbed07b689eed9c35b22f738006217">ice_blocks</a>
</li>
<li>jindx
: <a class="el" href="namespaceice__diagnostics.html#ae14da62eafd352886088d4688a3a2129">ice_diagnostics</a>
</li>
<li>jp
: <a class="el" href="namespaceice__diagnostics.html#a10a3f8830dada38c8bc78c01adf3e435">ice_diagnostics</a>
</li>
<li>jtest
: <a class="el" href="namespaceice__constants.html#ac7e42098d3809e659df8da6e3fe54368">ice_constants</a>
</li>
</ul>
</div>
<!--- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
<a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(0)"><span class="SelectionMark"> </span>All</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(1)"><span class="SelectionMark"> </span>Classes</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(2)"><span class="SelectionMark"> </span>Namespaces</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(3)"><span class="SelectionMark"> </span>Files</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(4)"><span class="SelectionMark"> </span>Functions</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(5)"><span class="SelectionMark"> </span>Variables</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(6)"><span class="SelectionMark"> </span>Defines</a></div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<hr size="1"/><address style="text-align: right;"><small>Generated on Tue Oct 6 14:02:26 2009 for CICE by
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.6.1 </small></address>
</body>
</html>
| {
"pile_set_name": "Github"
} |
13
charge = 0
O 1.207554 0.132236 0.556619
C 2.247140 0.097249 -0.046143
C 2.952489 1.266892 -0.577671
C 4.107501 1.240433 -1.248657
C 4.486444 2.640594 -1.612245
C 3.313789 3.441814 -1.014188
O 2.423634 2.494258 -0.394809
H 2.772864 -0.857537 -0.255414
H 4.670490 0.349968 -1.485552
H 4.572859 2.773920 -2.695229
H 5.450777 2.928756 -1.181863
H 2.745277 3.984897 -1.771464
H 3.633983 4.147540 -0.245482 | {
"pile_set_name": "Github"
} |
<cruisecontrol>
<project name="FinalBuilder Example" webURL="http://localhost/ccnet">
<sourcecontrol type="nullSourceControl" />
<tasks>
<!-- Simple FinalBuilder Block: -->
<FinalBuilder>
<ProjectFile>C:\source\BuildProject\Build Process.fbz4</ProjectFile>
</FinalBuilder>
<!-- FinalBuilder Block with all options: -->
<FinalBuilder>
<ProjectFile>C:\source\BuildProject\Build Process.fbz5</ProjectFile>
<FBVersion>5</FBVersion> <!-- (Optional) Use FinalBuilder 5 -->
<ShowBanner>false</ShowBanner> <!-- (Optional) Don't output header information -->
<FBVariables>
<!-- Set the variable 'IsContinuousIntegrationBuild' in the FinalBuilder project -->
<FBVariable name="IsContinuousIntegrationBuild" value="True" />
</FBVariables>
<Timeout>3600</Timeout> <!-- (Optional) Time the build out if it doesn't finish after an hour -->
<DontWriteToLog>true</DontWriteToLog> <!-- (Optional) Don't write anything to the FinalBuilder project log -->
</FinalBuilder>
</tasks>
</project>
</cruisecontrol> | {
"pile_set_name": "Github"
} |
#ifndef VERSION_H
#define VERSION_H
#define VERSION_MAJOR ""
#define VERSION_MINOR ""
#define VERSION_BUILD ""
#endif // VERSION_H
| {
"pile_set_name": "Github"
} |
#include <iostream>
using namespace std;
class Solution {
public:
bool isIsomorphic(string s, string t) {
for (int i = 0; s[i]; i++) {
if(s.find(s[i]) != t.find(t[i]))
return false;
}
return true;
}
};
void test(string test_name, string s, string t, bool expected)
{
bool res = Solution().isIsomorphic(s, t);
if (res == expected) {
cout << test_name << " success." << endl;
} else {
cout << test_name << " failed." << endl;
}
}
int main()
{
string s1 = "egg";
string t1 = "add";
bool expected1 = true;
test("test1", s1, t1, expected1);
string s2 = "foo";
string t2 = "bar";
bool expected2 = false;
test("test2", s2, t2, expected2);
string s3 = "paper";
string t3 = "title";
bool expected3 = true;
test("test3", s3, t3, expected3);
return 0;
}
| {
"pile_set_name": "Github"
} |
/* TomsFastMath, a fast ISO C bignum library.
*
* This project is meant to fill in where LibTomMath
* falls short. That is speed ;-)
*
* This project is public domain and free for all purposes.
*
* Tom St Denis, [email protected]
*/
#include "bignum_fast.h"
static const int lnz[16] = {
4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
};
/* Counts the number of lsbs which are zero before the first zero bit */
int fp_cnt_lsb(fp_int *a)
{
int x;
fp_digit q, qq;
/* easy out */
if (fp_iszero(a) == 1) {
return 0;
}
/* scan lower digits until non-zero */
for (x = 0; x < a->used && a->dp[x] == 0; x++);
q = a->dp[x];
x *= DIGIT_BIT;
/* now scan this digit until a 1 is found */
if ((q & 1) == 0) {
do {
qq = q & 15;
x += lnz[qq];
q >>= 4;
} while (qq == 0);
}
return x;
}
/* $Source: /cvs/libtom/tomsfastmath/src/bit/fp_cnt_lsb.c,v $ */
/* $Revision: 1.1 $ */
/* $Date: 2006/12/31 21:25:53 $ */
| {
"pile_set_name": "Github"
} |
/*!
@page input_guide Input guide
@tableofcontents
This guide introduces the input related functions of GLFW. For details on
a specific function in this category, see the @ref input. There are also guides
for the other areas of GLFW.
- @ref intro_guide
- @ref window_guide
- @ref context_guide
- @ref vulkan_guide
- @ref monitor_guide
GLFW provides many kinds of input. While some can only be polled, like time, or
only received via callbacks, like scrolling, many provide both callbacks and
polling. Callbacks are more work to use than polling but is less CPU intensive
and guarantees that you do not miss state changes.
All input callbacks receive a window handle. By using the
[window user pointer](@ref window_userptr), you can access non-global structures
or objects from your callbacks.
To get a better feel for how the various events callbacks behave, run the
`events` test program. It register every callback supported by GLFW and prints
out all arguments provided for every event, along with time and sequence
information.
@section events Event processing
GLFW needs to poll the window system for events both to provide input to the
application and to prove to the window system that the application hasn't locked
up. Event processing is normally done each frame after
[buffer swapping](@ref buffer_swap). Even when you have no windows, event
polling needs to be done in order to receive monitor and joystick connection
events.
There are three functions for processing pending events. @ref glfwPollEvents,
processes only those events that have already been received and then returns
immediately.
@code
glfwPollEvents();
@endcode
This is the best choice when rendering continuously, like most games do.
If you only need to update the contents of the window when you receive new
input, @ref glfwWaitEvents is a better choice.
@code
glfwWaitEvents();
@endcode
It puts the thread to sleep until at least one event has been received and then
processes all received events. This saves a great deal of CPU cycles and is
useful for, for example, editing tools.
If you want to wait for events but have UI elements or other tasks that need
periodic updates, @ref glfwWaitEventsTimeout lets you specify a timeout.
@code
glfwWaitEventsTimeout(0.7);
@endcode
It puts the thread to sleep until at least one event has been received, or until
the specified number of seconds have elapsed. It then processes any received
events.
If the main thread is sleeping in @ref glfwWaitEvents, you can wake it from
another thread by posting an empty event to the event queue with @ref
glfwPostEmptyEvent.
@code
glfwPostEmptyEvent();
@endcode
Do not assume that callbacks will _only_ be called in response to the above
functions. While it is necessary to process events in one or more of the ways
above, window systems that require GLFW to register callbacks of its own can
pass events to GLFW in response to many window system function calls. GLFW will
pass those events on to the application callbacks before returning.
For example, on Windows the system function that @ref glfwSetWindowSize is
implemented with will send window size events directly to the event callback
that every window has and that GLFW implements for its windows. If you have set
a [window size callback](@ref window_size) GLFW will call it in turn with the
new size before everything returns back out of the @ref glfwSetWindowSize call.
@section input_keyboard Keyboard input
GLFW divides keyboard input into two categories; key events and character
events. Key events relate to actual physical keyboard keys, whereas character
events relate to the Unicode code points generated by pressing some of them.
Keys and characters do not map 1:1. A single key press may produce several
characters, and a single character may require several keys to produce. This
may not be the case on your machine, but your users are likely not all using the
same keyboard layout, input method or even operating system as you.
@subsection input_key Key input
If you wish to be notified when a physical key is pressed or released or when it
repeats, set a key callback.
@code
glfwSetKeyCallback(window, key_callback);
@endcode
The callback function receives the [keyboard key](@ref keys), platform-specific
scancode, key action and [modifier bits](@ref mods).
@code
void key_callback(GLFWwindow* window, int key, int scancode, int action, int mods)
{
if (key == GLFW_KEY_E && action == GLFW_PRESS)
activate_airship();
}
@endcode
The action is one of `GLFW_PRESS`, `GLFW_REPEAT` or `GLFW_RELEASE`. The key
will be `GLFW_KEY_UNKNOWN` if GLFW lacks a key token for it, for example
_E-mail_ and _Play_ keys.
The scancode is unique for every key, regardless of whether it has a key token.
Scancodes are platform-specific but consistent over time, so keys will have
different scancodes depending on the platform but they are safe to save to disk.
You can query the scancode for any [named key](@ref keys) on the current
platform with @ref glfwGetKeyScancode.
@code
const int scancode = glfwGetKeyScancode(GLFW_KEY_X);
set_key_mapping(scancode, swap_weapons);
@endcode
The last reported state for every [named key](@ref keys) is also saved in
per-window state arrays that can be polled with @ref glfwGetKey.
@code
int state = glfwGetKey(window, GLFW_KEY_E);
if (state == GLFW_PRESS)
{
activate_airship();
}
@endcode
The returned state is one of `GLFW_PRESS` or `GLFW_RELEASE`.
This function only returns cached key event state. It does not poll the
system for the current physical state of the key.
@anchor GLFW_STICKY_KEYS
Whenever you poll state, you risk missing the state change you are looking for.
If a pressed key is released again before you poll its state, you will have
missed the key press. The recommended solution for this is to use a
key callback, but there is also the `GLFW_STICKY_KEYS` input mode.
@code
glfwSetInputMode(window, GLFW_STICKY_KEYS, GLFW_TRUE);
@endcode
When sticky keys mode is enabled, the pollable state of a key will remain
`GLFW_PRESS` until the state of that key is polled with @ref glfwGetKey. Once
it has been polled, if a key release event had been processed in the meantime,
the state will reset to `GLFW_RELEASE`, otherwise it will remain `GLFW_PRESS`.
@anchor GLFW_LOCK_KEY_MODS
If you wish to know what the state of the Caps Lock and Num Lock keys was when
input events were generated, set the `GLFW_LOCK_KEY_MODS` input mode.
@code
glfwSetInputMode(window, GLFW_LOCK_KEY_MODS, GLFW_TRUE);
@endcode
When this input mode is enabled, any callback that receives
[modifier bits](@ref mods) will have the @ref GLFW_MOD_CAPS_LOCK bit set if Caps
Lock was on when the event occurred and the @ref GLFW_MOD_NUM_LOCK bit set if
Num Lock was on.
The `GLFW_KEY_LAST` constant holds the highest value of any
[named key](@ref keys).
@subsection input_char Text input
GLFW supports text input in the form of a stream of
[Unicode code points](https://en.wikipedia.org/wiki/Unicode), as produced by the
operating system text input system. Unlike key input, text input obeys keyboard
layouts and modifier keys and supports composing characters using
[dead keys](https://en.wikipedia.org/wiki/Dead_key). Once received, you can
encode the code points into UTF-8 or any other encoding you prefer.
Because an `unsigned int` is 32 bits long on all platforms supported by GLFW,
you can treat the code point argument as native endian UTF-32.
If you wish to offer regular text input, set a character callback.
@code
glfwSetCharCallback(window, character_callback);
@endcode
The callback function receives Unicode code points for key events that would
have led to regular text input and generally behaves as a standard text field on
that platform.
@code
void character_callback(GLFWwindow* window, unsigned int codepoint)
{
}
@endcode
@subsection input_key_name Key names
If you wish to refer to keys by name, you can query the keyboard layout
dependent name of printable keys with @ref glfwGetKeyName.
@code
const char* key_name = glfwGetKeyName(GLFW_KEY_W, 0);
show_tutorial_hint("Press %s to move forward", key_name);
@endcode
This function can handle both [keys and scancodes](@ref input_key). If the
specified key is `GLFW_KEY_UNKNOWN` then the scancode is used, otherwise it is
ignored. This matches the behavior of the key callback, meaning the callback
arguments can always be passed unmodified to this function.
@section input_mouse Mouse input
Mouse input comes in many forms, including mouse motion, button presses and
scrolling offsets. The cursor appearance can also be changed, either to
a custom image or a standard cursor shape from the system theme.
@subsection cursor_pos Cursor position
If you wish to be notified when the cursor moves over the window, set a cursor
position callback.
@code
glfwSetCursorPosCallback(window, cursor_position_callback);
@endcode
The callback functions receives the cursor position, measured in screen
coordinates but relative to the top-left corner of the window content area. On
platforms that provide it, the full sub-pixel cursor position is passed on.
@code
static void cursor_position_callback(GLFWwindow* window, double xpos, double ypos)
{
}
@endcode
The cursor position is also saved per-window and can be polled with @ref
glfwGetCursorPos.
@code
double xpos, ypos;
glfwGetCursorPos(window, &xpos, &ypos);
@endcode
@subsection cursor_mode Cursor mode
@anchor GLFW_CURSOR
The `GLFW_CURSOR` input mode provides several cursor modes for special forms of
mouse motion input. By default, the cursor mode is `GLFW_CURSOR_NORMAL`,
meaning the regular arrow cursor (or another cursor set with @ref glfwSetCursor)
is used and cursor motion is not limited.
If you wish to implement mouse motion based camera controls or other input
schemes that require unlimited mouse movement, set the cursor mode to
`GLFW_CURSOR_DISABLED`.
@code
glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
@endcode
This will hide the cursor and lock it to the specified window. GLFW will then
take care of all the details of cursor re-centering and offset calculation and
providing the application with a virtual cursor position. This virtual position
is provided normally via both the cursor position callback and through polling.
@note You should not implement your own version of this functionality using
other features of GLFW. It is not supported and will not work as robustly as
`GLFW_CURSOR_DISABLED`.
If you only wish the cursor to become hidden when it is over a window but still
want it to behave normally, set the cursor mode to `GLFW_CURSOR_HIDDEN`.
@code
glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_HIDDEN);
@endcode
This mode puts no limit on the motion of the cursor.
To exit out of either of these special modes, restore the `GLFW_CURSOR_NORMAL`
cursor mode.
@code
glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_NORMAL);
@endcode
@anchor GLFW_RAW_MOUSE_MOTION
@subsection raw_mouse_motion Raw mouse motion
When the cursor is disabled, raw (unscaled and unaccelerated) mouse motion can
be enabled if available.
Raw mouse motion is closer to the actual motion of the mouse across a surface.
It is not affected by the scaling and acceleration applied to the motion of the
desktop cursor. That processing is suitable for a cursor while raw motion is
better for controlling for example a 3D camera. Because of this, raw mouse
motion is only provided when the cursor is disabled.
Call @ref glfwRawMouseMotionSupported to check if the current machine provides
raw motion and set the `GLFW_RAW_MOUSE_MOTION` input mode to enable it. It is
disabled by default.
@code
if (glfwRawMouseMotionSupported())
glfwSetInputMode(window, GLFW_RAW_MOUSE_MOTION, GLFW_TRUE);
@endcode
If supported, raw mouse motion can be enabled or disabled per-window and at any
time but it will only be provided when the cursor is disabled.
@subsection cursor_object Cursor objects
GLFW supports creating both custom and system theme cursor images, encapsulated
as @ref GLFWcursor objects. They are created with @ref glfwCreateCursor or @ref
glfwCreateStandardCursor and destroyed with @ref glfwDestroyCursor, or @ref
glfwTerminate, if any remain.
@subsubsection cursor_custom Custom cursor creation
A custom cursor is created with @ref glfwCreateCursor, which returns a handle to
the created cursor object. For example, this creates a 16x16 white square
cursor with the hot-spot in the upper-left corner:
@code
unsigned char pixels[16 * 16 * 4];
memset(pixels, 0xff, sizeof(pixels));
GLFWimage image;
image.width = 16;
image.height = 16;
image.pixels = pixels;
GLFWcursor* cursor = glfwCreateCursor(&image, 0, 0);
@endcode
If cursor creation fails, `NULL` will be returned, so it is necessary to check
the return value.
The image data is 32-bit, little-endian, non-premultiplied RGBA, i.e. eight bits
per channel with the red channel first. The pixels are arranged canonically as
sequential rows, starting from the top-left corner.
@subsubsection cursor_standard Standard cursor creation
A cursor with a [standard shape](@ref shapes) from the current system cursor
theme can be can be created with @ref glfwCreateStandardCursor.
@code
GLFWcursor* cursor = glfwCreateStandardCursor(GLFW_HRESIZE_CURSOR);
@endcode
These cursor objects behave in the exact same way as those created with @ref
glfwCreateCursor except that the system cursor theme provides the actual image.
@subsubsection cursor_destruction Cursor destruction
When a cursor is no longer needed, destroy it with @ref glfwDestroyCursor.
@code
glfwDestroyCursor(cursor);
@endcode
Cursor destruction always succeeds. If the cursor is current for any window,
that window will revert to the default cursor. This does not affect the cursor
mode. All remaining cursors are destroyed when @ref glfwTerminate is called.
@subsubsection cursor_set Cursor setting
A cursor can be set as current for a window with @ref glfwSetCursor.
@code
glfwSetCursor(window, cursor);
@endcode
Once set, the cursor image will be used as long as the system cursor is over the
content area of the window and the [cursor mode](@ref cursor_mode) is set
to `GLFW_CURSOR_NORMAL`.
A single cursor may be set for any number of windows.
To revert to the default cursor, set the cursor of that window to `NULL`.
@code
glfwSetCursor(window, NULL);
@endcode
When a cursor is destroyed, any window that has it set will revert to the
default cursor. This does not affect the cursor mode.
@subsection cursor_enter Cursor enter/leave events
If you wish to be notified when the cursor enters or leaves the content area of
a window, set a cursor enter/leave callback.
@code
glfwSetCursorEnterCallback(window, cursor_enter_callback);
@endcode
The callback function receives the new classification of the cursor.
@code
void cursor_enter_callback(GLFWwindow* window, int entered)
{
if (entered)
{
// The cursor entered the content area of the window
}
else
{
// The cursor left the content area of the window
}
}
@endcode
You can query whether the cursor is currently inside the content area of the
window with the [GLFW_HOVERED](@ref GLFW_HOVERED_attrib) window attribute.
@code
if (glfwGetWindowAttrib(window, GLFW_HOVERED))
{
highlight_interface();
}
@endcode
@subsection input_mouse_button Mouse button input
If you wish to be notified when a mouse button is pressed or released, set
a mouse button callback.
@code
glfwSetMouseButtonCallback(window, mouse_button_callback);
@endcode
The callback function receives the [mouse button](@ref buttons), button action
and [modifier bits](@ref mods).
@code
void mouse_button_callback(GLFWwindow* window, int button, int action, int mods)
{
if (button == GLFW_MOUSE_BUTTON_RIGHT && action == GLFW_PRESS)
popup_menu();
}
@endcode
The action is one of `GLFW_PRESS` or `GLFW_RELEASE`.
Mouse button states for [named buttons](@ref buttons) are also saved in
per-window state arrays that can be polled with @ref glfwGetMouseButton.
@code
int state = glfwGetMouseButton(window, GLFW_MOUSE_BUTTON_LEFT);
if (state == GLFW_PRESS)
{
upgrade_cow();
}
@endcode
The returned state is one of `GLFW_PRESS` or `GLFW_RELEASE`.
This function only returns cached mouse button event state. It does not poll
the system for the current state of the mouse button.
@anchor GLFW_STICKY_MOUSE_BUTTONS
Whenever you poll state, you risk missing the state change you are looking for.
If a pressed mouse button is released again before you poll its state, you will have
missed the button press. The recommended solution for this is to use a
mouse button callback, but there is also the `GLFW_STICKY_MOUSE_BUTTONS`
input mode.
@code
glfwSetInputMode(window, GLFW_STICKY_MOUSE_BUTTONS, GLFW_TRUE);
@endcode
When sticky mouse buttons mode is enabled, the pollable state of a mouse button
will remain `GLFW_PRESS` until the state of that button is polled with @ref
glfwGetMouseButton. Once it has been polled, if a mouse button release event
had been processed in the meantime, the state will reset to `GLFW_RELEASE`,
otherwise it will remain `GLFW_PRESS`.
The `GLFW_MOUSE_BUTTON_LAST` constant holds the highest value of any
[named button](@ref buttons).
@subsection scrolling Scroll input
If you wish to be notified when the user scrolls, whether with a mouse wheel or
touchpad gesture, set a scroll callback.
@code
glfwSetScrollCallback(window, scroll_callback);
@endcode
The callback function receives two-dimensional scroll offsets.
@code
void scroll_callback(GLFWwindow* window, double xoffset, double yoffset)
{
}
@endcode
A normal mouse wheel, being vertical, provides offsets along the Y-axis.
@section joystick Joystick input
The joystick functions expose connected joysticks and controllers, with both
referred to as joysticks. It supports up to sixteen joysticks, ranging from
`GLFW_JOYSTICK_1`, `GLFW_JOYSTICK_2` up to and including `GLFW_JOYSTICK_16` or
`GLFW_JOYSTICK_LAST`. You can test whether a [joystick](@ref joysticks) is
present with @ref glfwJoystickPresent.
@code
int present = glfwJoystickPresent(GLFW_JOYSTICK_1);
@endcode
Each joystick has zero or more axes, zero or more buttons, zero or more hats,
a human-readable name, a user pointer and an SDL compatible GUID.
When GLFW is initialized, detected joysticks are added to the beginning of
the array. Once a joystick is detected, it keeps its assigned ID until it is
disconnected or the library is terminated, so as joysticks are connected and
disconnected, there may appear gaps in the IDs.
Joystick axis, button and hat state is updated when polled and does not require
a window to be created or events to be processed. However, if you want joystick
connection and disconnection events reliably delivered to the
[joystick callback](@ref joystick_event) then you must
[process events](@ref events).
To see all the properties of all connected joysticks in real-time, run the
`joysticks` test program.
@subsection joystick_axis Joystick axis states
The positions of all axes of a joystick are returned by @ref
glfwGetJoystickAxes. See the reference documentation for the lifetime of the
returned array.
@code
int count;
const float* axes = glfwGetJoystickAxes(GLFW_JOYSTICK_5, &count);
@endcode
Each element in the returned array is a value between -1.0 and 1.0.
@subsection joystick_button Joystick button states
The states of all buttons of a joystick are returned by @ref
glfwGetJoystickButtons. See the reference documentation for the lifetime of the
returned array.
@code
int count;
const unsigned char* buttons = glfwGetJoystickButtons(GLFW_JOYSTICK_3, &count);
@endcode
Each element in the returned array is either `GLFW_PRESS` or `GLFW_RELEASE`.
For backward compatibility with earlier versions that did not have @ref
glfwGetJoystickHats, the button array by default also includes all hats. See
the reference documentation for @ref glfwGetJoystickButtons for details.
@subsection joystick_hat Joystick hat states
The states of all hats are returned by @ref glfwGetJoystickHats. See the
reference documentation for the lifetime of the returned array.
@code
int count;
const unsigned char* hats = glfwGetJoystickHats(GLFW_JOYSTICK_7, &count);
@endcode
Each element in the returned array is one of the following:
Name | Value
---- | -----
`GLFW_HAT_CENTERED` | 0
`GLFW_HAT_UP` | 1
`GLFW_HAT_RIGHT` | 2
`GLFW_HAT_DOWN` | 4
`GLFW_HAT_LEFT` | 8
`GLFW_HAT_RIGHT_UP` | `GLFW_HAT_RIGHT` \| `GLFW_HAT_UP`
`GLFW_HAT_RIGHT_DOWN` | `GLFW_HAT_RIGHT` \| `GLFW_HAT_DOWN`
`GLFW_HAT_LEFT_UP` | `GLFW_HAT_LEFT` \| `GLFW_HAT_UP`
`GLFW_HAT_LEFT_DOWN` | `GLFW_HAT_LEFT` \| `GLFW_HAT_DOWN`
The diagonal directions are bitwise combinations of the primary (up, right, down
and left) directions and you can test for these individually by ANDing it with
the corresponding direction.
@code
if (hats[2] & GLFW_HAT_RIGHT)
{
// State of hat 2 could be right-up, right or right-down
}
@endcode
For backward compatibility with earlier versions that did not have @ref
glfwGetJoystickHats, all hats are by default also included in the button array.
See the reference documentation for @ref glfwGetJoystickButtons for details.
@subsection joystick_name Joystick name
The human-readable, UTF-8 encoded name of a joystick is returned by @ref
glfwGetJoystickName. See the reference documentation for the lifetime of the
returned string.
@code
const char* name = glfwGetJoystickName(GLFW_JOYSTICK_4);
@endcode
Joystick names are not guaranteed to be unique. Two joysticks of the same model
and make may have the same name. Only the [joystick token](@ref joysticks) is
guaranteed to be unique, and only until that joystick is disconnected.
@subsection joystick_userptr Joystick user pointer
Each joystick has a user pointer that can be set with @ref
glfwSetJoystickUserPointer and queried with @ref glfwGetJoystickUserPointer.
This can be used for any purpose you need and will not be modified by GLFW. The
value will be kept until the joystick is disconnected or until the library is
terminated.
The initial value of the pointer is `NULL`.
@subsection joystick_event Joystick configuration changes
If you wish to be notified when a joystick is connected or disconnected, set
a joystick callback.
@code
glfwSetJoystickCallback(joystick_callback);
@endcode
The callback function receives the ID of the joystick that has been connected
and disconnected and the event that occurred.
@code
void joystick_callback(int jid, int event)
{
if (event == GLFW_CONNECTED)
{
// The joystick was connected
}
else if (event == GLFW_DISCONNECTED)
{
// The joystick was disconnected
}
}
@endcode
For joystick connection and disconnection events to be delivered on all
platforms, you need to call one of the [event processing](@ref events)
functions. Joystick disconnection may also be detected and the callback
called by joystick functions. The function will then return whatever it
returns for a disconnected joystick.
Only @ref glfwGetJoystickName and @ref glfwGetJoystickUserPointer will return
useful values for a disconnected joystick and only before the monitor callback
returns.
@subsection gamepad Gamepad input
The joystick functions provide unlabeled axes, buttons and hats, with no
indication of where they are located on the device. Their order may also vary
between platforms even with the same device.
To solve this problem the SDL community crowdsourced the
[SDL_GameControllerDB](https://github.com/gabomdq/SDL_GameControllerDB) project,
a database of mappings from many different devices to an Xbox-like gamepad.
GLFW supports this mapping format and contains a copy of the mappings
available at the time of release. See @ref gamepad_mapping for how to update
this at runtime. Mappings will be assigned to joysticks automatically any time
a joystick is connected or the mappings are updated.
You can check whether a joystick is both present and has a gamepad mapping with
@ref glfwJoystickIsGamepad.
@code
if (glfwJoystickIsGamepad(GLFW_JOYSTICK_2))
{
// Use as gamepad
}
@endcode
If you are only interested in gamepad input you can use this function instead of
@ref glfwJoystickPresent.
You can query the human-readable name provided by the gamepad mapping with @ref
glfwGetGamepadName. This may or may not be the same as the
[joystick name](@ref joystick_name).
@code
const char* name = glfwGetGamepadName(GLFW_JOYSTICK_7);
@endcode
To retrieve the gamepad state of a joystick, call @ref glfwGetGamepadState.
@code
GLFWgamepadstate state;
if (glfwGetGamepadState(GLFW_JOYSTICK_3, &state))
{
if (state.buttons[GLFW_GAMEPAD_BUTTON_A])
{
input_jump();
}
input_speed(state.axes[GLFW_GAMEPAD_AXIS_RIGHT_TRIGGER]);
}
@endcode
The @ref GLFWgamepadstate struct has two arrays; one for button states and one
for axis states. The values for each button and axis are the same as for the
@ref glfwGetJoystickButtons and @ref glfwGetJoystickAxes functions, i.e.
`GLFW_PRESS` or `GLFW_RELEASE` for buttons and -1.0 to 1.0 inclusive for axes.
The sizes of the arrays and the positions within each array are fixed.
The [button indices](@ref gamepad_buttons) are `GLFW_GAMEPAD_BUTTON_A`,
`GLFW_GAMEPAD_BUTTON_B`, `GLFW_GAMEPAD_BUTTON_X`, `GLFW_GAMEPAD_BUTTON_Y`,
`GLFW_GAMEPAD_BUTTON_LEFT_BUMPER`, `GLFW_GAMEPAD_BUTTON_RIGHT_BUMPER`,
`GLFW_GAMEPAD_BUTTON_BACK`, `GLFW_GAMEPAD_BUTTON_START`,
`GLFW_GAMEPAD_BUTTON_GUIDE`, `GLFW_GAMEPAD_BUTTON_LEFT_THUMB`,
`GLFW_GAMEPAD_BUTTON_RIGHT_THUMB`, `GLFW_GAMEPAD_BUTTON_DPAD_UP`,
`GLFW_GAMEPAD_BUTTON_DPAD_RIGHT`, `GLFW_GAMEPAD_BUTTON_DPAD_DOWN` and
`GLFW_GAMEPAD_BUTTON_DPAD_LEFT`.
For those who prefer, there are also the `GLFW_GAMEPAD_BUTTON_CROSS`,
`GLFW_GAMEPAD_BUTTON_CIRCLE`, `GLFW_GAMEPAD_BUTTON_SQUARE` and
`GLFW_GAMEPAD_BUTTON_TRIANGLE` aliases for the A, B, X and Y button indices.
The [axis indices](@ref gamepad_axes) are `GLFW_GAMEPAD_AXIS_LEFT_X`,
`GLFW_GAMEPAD_AXIS_LEFT_Y`, `GLFW_GAMEPAD_AXIS_RIGHT_X`,
`GLFW_GAMEPAD_AXIS_RIGHT_Y`, `GLFW_GAMEPAD_AXIS_LEFT_TRIGGER` and
`GLFW_GAMEPAD_AXIS_RIGHT_TRIGGER`.
The `GLFW_GAMEPAD_BUTTON_LAST` and `GLFW_GAMEPAD_AXIS_LAST` constants equal
the largest available index for each array.
@subsection gamepad_mapping Gamepad mappings
GLFW contains a copy of the mappings available in
[SDL_GameControllerDB](https://github.com/gabomdq/SDL_GameControllerDB) at the
time of release. Newer ones can be added at runtime with @ref
glfwUpdateGamepadMappings.
@code
const char* mappings = load_file_contents("game/data/gamecontrollerdb.txt");
glfwUpdateGamepadMappings(mappings);
@endcode
This function supports everything from single lines up to and including the
unmodified contents of the whole `gamecontrollerdb.txt` file.
Below is a description of the mapping format. Please keep in mind that __this
description is not authoritative__. The format is defined by the SDL and
SDL_GameControllerDB projects and their documentation and code takes precedence.
Each mapping is a single line of comma-separated values describing the GUID,
name and layout of the gamepad. Lines that do not begin with a hexadecimal
digit are ignored.
The first value is always the gamepad GUID, a 32 character long hexadecimal
string that typically identifies its make, model, revision and the type of
connection to the computer. When this information is not available, the GUID is
generated using the gamepad name. GLFW uses the SDL 2.0.5+ GUID format but can
convert from the older formats.
The second value is always the human-readable name of the gamepad.
All subsequent values are in the form `<field>:<value>` and describe the layout
of the mapping. These fields may not all be present and may occur in any order.
The button fields are `a`, `b`, `c`, `d`, `back`, `start`, `guide`, `dpup`,
`dpright`, `dpdown`, `dpleft`, `leftshoulder`, `rightshoulder`, `leftstick` and
`rightstick`.
The axis fields are `leftx`, `lefty`, `rightx`, `righty`, `lefttrigger` and
`righttrigger`.
The value of an axis or button field can be a joystick button, a joystick axis,
a hat bitmask or empty. Joystick buttons are specified as `bN`, for example
`b2` for the third button. Joystick axes are specified as `aN`, for example
`a7` for the eighth button. Joystick hat bit masks are specified as `hN.N`, for
example `h0.8` for left on the first hat. More than one bit may be set in the
mask.
Before an axis there may be a `+` or `-` range modifier, for example `+a3` for
the positive half of the fourth axis. This restricts input to only the positive
or negative halves of the joystick axis. After an axis or half-axis there may
be the `~` inversion modifier, for example `a2~` or `-a7~`. This negates the
values of the gamepad axis.
The hat bit mask match the [hat states](@ref hat_state) in the joystick
functions.
There is also the special `platform` field that specifies which platform the
mapping is valid for. Possible values are `Windows`, `Mac OS X` and `Linux`.
Below is an example of what a gamepad mapping might look like. It is the
one built into GLFW for Xbox controllers accessed via the XInput API on Windows.
This example has been broken into several lines to fit on the page, but real
gamepad mappings must be a single line.
@code{.unparsed}
78696e70757401000000000000000000,XInput Gamepad (GLFW),platform:Windows,a:b0,
b:b1,x:b2,y:b3,leftshoulder:b4,rightshoulder:b5,back:b6,start:b7,leftstick:b8,
rightstick:b9,leftx:a0,lefty:a1,rightx:a2,righty:a3,lefttrigger:a4,
righttrigger:a5,dpup:h0.1,dpright:h0.2,dpdown:h0.4,dpleft:h0.8,
@endcode
@note GLFW does not yet support the output range and modifiers `+` and `-` that
were recently added to SDL. The input modifiers `+`, `-` and `~` are supported
and described above.
@section time Time input
GLFW provides high-resolution time input, in seconds, with @ref glfwGetTime.
@code
double seconds = glfwGetTime();
@endcode
It returns the number of seconds since the library was initialized with @ref
glfwInit. The platform-specific time sources used typically have micro- or
nanosecond resolution.
You can modify the base time with @ref glfwSetTime.
@code
glfwSetTime(4.0);
@endcode
This sets the time to the specified time, in seconds, and it continues to count
from there.
You can also access the raw timer used to implement the functions above,
with @ref glfwGetTimerValue.
@code
uint64_t value = glfwGetTimerValue();
@endcode
This value is in 1 / frequency seconds. The frequency of the raw
timer varies depending on the operating system and hardware. You can query the
frequency, in Hz, with @ref glfwGetTimerFrequency.
@code
uint64_t frequency = glfwGetTimerFrequency();
@endcode
@section clipboard Clipboard input and output
If the system clipboard contains a UTF-8 encoded string or if it can be
converted to one, you can retrieve it with @ref glfwGetClipboardString. See the
reference documentation for the lifetime of the returned string.
@code
const char* text = glfwGetClipboardString(NULL);
if (text)
{
insert_text(text);
}
@endcode
If the clipboard is empty or if its contents could not be converted, `NULL` is
returned.
The contents of the system clipboard can be set to a UTF-8 encoded string with
@ref glfwSetClipboardString.
@code
glfwSetClipboardString(NULL, "A string with words in it");
@endcode
@section path_drop Path drop input
If you wish to receive the paths of files and/or directories dropped on
a window, set a file drop callback.
@code
glfwSetDropCallback(window, drop_callback);
@endcode
The callback function receives an array of paths encoded as UTF-8.
@code
void drop_callback(GLFWwindow* window, int count, const char** paths)
{
int i;
for (i = 0; i < count; i++)
handle_dropped_file(paths[i]);
}
@endcode
The path array and its strings are only valid until the file drop callback
returns, as they may have been generated specifically for that event. You need
to make a deep copy of the array if you want to keep the paths.
*/
| {
"pile_set_name": "Github"
} |
class Syntaxerl < Formula
homepage "https://github.com/ten0s/syntaxerl"
desc "Syntax checker for Erlang code and config files"
url "https://github.com/ten0s/syntaxerl/archive/0.8.0.tar.gz"
sha256 "4c5a6390a7d28869bf44cc831ae59cb69480a8481853de8dd59672b7cf5bff7b"
bottle do
cellar :any
sha256 "91e1dbffa26469b1738f3f7ed5229e1295a2f743c34160f1883b544675c080d2" => :yosemite
sha256 "52967a2ac507a5c7be860c58d0636c4a890f4c6fa0b95bf0bebb533bcf595f39" => :mavericks
sha256 "8416da42bea9d0701e0fa0a9f83865f16a0ba732a0168e646878fbe032af0dc7" => :mountain_lion
end
depends_on "erlang"
def install
system "make"
bin.install "syntaxerl"
end
test do
(testpath/"app.config").write "[{app,[{arg1,1},{arg2,2}]}]."
assert_equal "", shell_output("#{bin}/syntaxerl #{testpath}/app.config")
(testpath/"invalid.config").write "]["
assert_match /invalid.config:1: syntax error before: '\]'/, shell_output("#{bin}/syntaxerl #{testpath}/invalid.config")
end
end
| {
"pile_set_name": "Github"
} |
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <[email protected]>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace Symfony\Bundle\FrameworkBundle\Routing;
use Symfony\Bundle\FrameworkBundle\Controller\ControllerNameParser;
use Symfony\Component\Config\Loader\DelegatingLoader as BaseDelegatingLoader;
use Symfony\Component\Config\Loader\LoaderResolverInterface;
use Psr\Log\LoggerInterface;
use Symfony\Component\Routing\RouteCollection;
/**
* DelegatingLoader delegates route loading to other loaders using a loader resolver.
*
* This implementation resolves the _controller attribute from the short notation
* to the fully-qualified form (from a:b:c to class:method).
*
* @author Fabien Potencier <[email protected]>
*/
class DelegatingLoader extends BaseDelegatingLoader
{
protected $parser;
protected $logger;
/**
* Constructor.
*
* @param ControllerNameParser $parser A ControllerNameParser instance
* @param LoggerInterface $logger A LoggerInterface instance
* @param LoaderResolverInterface $resolver A LoaderResolverInterface instance
*/
public function __construct(ControllerNameParser $parser, LoggerInterface $logger = null, LoaderResolverInterface $resolver)
{
$this->parser = $parser;
$this->logger = $logger;
parent::__construct($resolver);
}
/**
* Loads a resource.
*
* @param mixed $resource A resource
* @param string $type The resource type
*
* @return RouteCollection A RouteCollection instance
*/
public function load($resource, $type = null)
{
$collection = parent::load($resource, $type);
foreach ($collection->all() as $route) {
if ($controller = $route->getDefault('_controller')) {
try {
$controller = $this->parser->parse($controller);
} catch (\Exception $e) {
// unable to optimize unknown notation
}
$route->setDefault('_controller', $controller);
}
}
return $collection;
}
}
| {
"pile_set_name": "Github"
} |
var baseCreate = require('./baseCreate'),
baseLodash = require('./baseLodash');
/**
* The base constructor for creating `lodash` wrapper objects.
*
* @private
* @param {*} value The value to wrap.
* @param {boolean} [chainAll] Enable chaining for all wrapper methods.
* @param {Array} [actions=[]] Actions to peform to resolve the unwrapped value.
*/
function LodashWrapper(value, chainAll, actions) {
this.__wrapped__ = value;
this.__actions__ = actions || [];
this.__chain__ = !!chainAll;
}
LodashWrapper.prototype = baseCreate(baseLodash.prototype);
LodashWrapper.prototype.constructor = LodashWrapper;
module.exports = LodashWrapper;
| {
"pile_set_name": "Github"
} |
#!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Scans the Chromium source of UseCounter, formats the Feature enum for
histograms.xml and merges it. This script can also generate a python code
snippet to put in uma.py of Chromium Dashboard. Make sure that you review the
output for correctness.
"""
import optparse
import os
import sys
from update_histogram_enum import ReadHistogramValues
from update_histogram_enum import UpdateHistogramEnum
def PrintEnumForDashboard(enum_dict):
"""Prints enum_items formatted for use in uma.py of Chromium dashboard."""
for key in sorted(enum_dict.iterkeys()):
print ' %d: \'%s\',' % (key, enum_dict[key])
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('--for-dashboard', action='store_true', dest='dashboard',
default=False,
help='Print enum definition formatted for use in uma.py of '
'Chromium dashboard developed at '
'https://github.com/GoogleChrome/chromium-dashboard')
options, args = parser.parse_args()
source_path = 'third_party/WebKit/Source/core/frame/UseCounter.h'
START_MARKER = '^enum Feature {'
END_MARKER = '^NumberOfFeatures'
if options.dashboard:
enum_dict = ReadHistogramValues(source_path, START_MARKER, END_MARKER)
PrintEnumForDashboard(enum_dict)
else:
UpdateHistogramEnum(
histogram_enum_name='FeatureObserver',
source_enum_path=source_path,
start_marker=START_MARKER,
end_marker=END_MARKER)
| {
"pile_set_name": "Github"
} |
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*
* @format
* @emails oncall+react_native
* @flow
*/
'use strict';
const React = require('react');
const ShallowRenderer = require('react-test-renderer/shallow');
const TestRenderer = require('react-test-renderer');
/* $FlowFixMe(>=0.122.0 site=react_native_fb) This comment suppresses an error
* found when Flow v0.122.0 was deployed. To see the error, delete this comment
* and run Flow. */
const renderer = new ShallowRenderer();
export const shallow = (Component: React.Element<any>): any => {
const Wrapper = (): React.Element<any> => Component;
return renderer.render(<Wrapper />);
};
export const shallowRender = (Component: React.Element<any>): any => {
return renderer.render(Component);
};
export const create = (Component: React.Element<any>): any => {
return TestRenderer.create(Component);
};
| {
"pile_set_name": "Github"
} |
extends /templates/core/modal-base-flat.jade
block modal-header
//-
This allows for the header color to switch without the subview templates
needing to contain the header
.modal-header(class=view.signupState.get('path'))
span.glyphicon.glyphicon-remove.button.close(data-dismiss="modal", aria-hidden="true")
+modal-header-content
mixin modal-header-content
h3
case view.signupState.get('path')
when 'student'
span(data-i18n="signup.create_student_header")
when 'teacher'
span(data-i18n="signup.create_teacher_header")
when 'individual'
if view.signupState.get('subModalContinue')
span(data-i18n="signup.complete_subscription")
else
span(data-i18n="signup.create_individual_header")
default
span(data-i18n="login.sign_up")
//-
This is where the subviews (screens) are hooked up.
Most subview templates have a .modal-body at their root, but this is inconsistent and needs organization.
block modal-body
case view.signupState.get('screen')
when 'choose-account-type'
#choose-account-type-view
when 'segment-check'
#segment-check-view
when 'eu-confirmation'
#eu-confirmation-view
when 'basic-info'
#basic-info-view
when 'coppa-deny'
#coppa-deny-view
when 'sso-already-exists'
#single-sign-on-already-exists-view
when 'sso-confirm'
#single-sign-on-confirm-view
when 'extras'
#extras-view
when 'confirmation'
#confirmation-view
when 'teacher-signup-component'
#teacher-signup-component
block modal-footer
//-
This allows for the footer color to switch without the subview templates
needing to contain the footer
.modal-footer(class=view.signupState.get('path'))
+modal-footer-content
mixin modal-footer-content
if view.signupState.get('screen') !== 'confirmation'
.modal-footer-content
.small-details.rtl-allowed
span.spr(data-i18n="signup.login_switch")
a.login-link
span(data-i18n="signup.sign_in")
| {
"pile_set_name": "Github"
} |
let rec reverse l = match l with
| [] -> []
| head::tail -> ((reverse tail)@[head])
;;
reverse [0;1;2;3];; | {
"pile_set_name": "Github"
} |
/// @ref gtx_normal
/// @file glm/gtx/normal.inl
namespace glm
{
template <typename T, precision P>
GLM_FUNC_QUALIFIER tvec3<T, P> triangleNormal
(
tvec3<T, P> const & p1,
tvec3<T, P> const & p2,
tvec3<T, P> const & p3
)
{
return normalize(cross(p1 - p2, p1 - p3));
}
}//namespace glm
| {
"pile_set_name": "Github"
} |
// Copyright (c) 2009 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef CHROME_BROWSER_RESOURCES_UTIL_H_
#define CHROME_BROWSER_RESOURCES_UTIL_H_
#include <string>
#include "base/macros.h"
class ResourcesUtil {
public:
// Returns the theme resource id or -1 if no resource with the name exists.
static int GetThemeResourceId(const std::string& resource_name);
private:
ResourcesUtil() {}
DISALLOW_COPY_AND_ASSIGN(ResourcesUtil);
};
#endif // CHROME_BROWSER_RESOURCES_UTIL_H_
| {
"pile_set_name": "Github"
} |
//////////////////////////////////////////////////////////////////////////////
//
// (C) Copyright Ion Gaztanaga 2015-2015. Distributed under the Boost
// Software License, Version 1.0. (See accompanying file
// LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt)
//
// See http://www.boost.org/libs/container for documentation.
//
//////////////////////////////////////////////////////////////////////////////
#ifndef BOOST_CONTAINER_PMR_SET_HPP
#define BOOST_CONTAINER_PMR_SET_HPP
#if defined (_MSC_VER)
# pragma once
#endif
#include <boost/container/set.hpp>
#include <boost/container/pmr/polymorphic_allocator.hpp>
namespace boost {
namespace container {
namespace pmr {
#if !defined(BOOST_NO_CXX11_TEMPLATE_ALIASES)
template <class Key
,class Compare = std::less<Key>
,class Options = tree_assoc_defaults >
using set = boost::container::set<Key, Compare, polymorphic_allocator<Key>, Options>;
template <class Key
,class Compare = std::less<Key>
,class Options = tree_assoc_defaults >
using multiset = boost::container::multiset<Key, Compare, polymorphic_allocator<Key>, Options>;
#endif
//! A portable metafunction to obtain a set
//! that uses a polymorphic allocator
template <class Key
,class Compare = std::less<Key>
,class Options = tree_assoc_defaults >
struct set_of
{
typedef boost::container::set<Key, Compare, polymorphic_allocator<Key>, Options> type;
};
//! A portable metafunction to obtain a multiset
//! that uses a polymorphic allocator
template <class Key
,class Compare = std::less<Key>
,class Options = tree_assoc_defaults >
struct multiset_of
{
typedef boost::container::multiset<Key, Compare, polymorphic_allocator<Key>, Options> type;
};
} //namespace pmr {
} //namespace container {
} //namespace boost {
#endif //BOOST_CONTAINER_PMR_SET_HPP
| {
"pile_set_name": "Github"
} |
<html>
<body>
<script type="text/javascript" src="devtools_init_page.js"></script>
</body>
</html> | {
"pile_set_name": "Github"
} |
/*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Eric Anholt <[email protected]>
* Kenneth Graunke <[email protected]>
*/
/** @file gen6_queryobj.c
*
* Support for query objects (GL_ARB_occlusion_query, GL_ARB_timer_query,
* GL_EXT_transform_feedback, and friends) on platforms that support
* hardware contexts (Gen6+).
*/
#include "main/imports.h"
#include "brw_context.h"
#include "brw_defines.h"
#include "brw_state.h"
#include "perf/gen_perf_regs.h"
#include "intel_batchbuffer.h"
#include "intel_buffer_objects.h"
static inline void
set_query_availability(struct brw_context *brw, struct brw_query_object *query,
bool available)
{
/* For platforms that support ARB_query_buffer_object, we write the
* query availability for "pipelined" queries.
*
* Most counter snapshots are written by the command streamer, by
* doing a CS stall and then MI_STORE_REGISTER_MEM. For these
* counters, the CS stall guarantees that the results will be
* available when subsequent CS commands run. So we don't need to
* do any additional tracking.
*
* Other counters (occlusion queries and timestamp) are written by
* PIPE_CONTROL, without a CS stall. This means that we can't be
* sure whether the writes have landed yet or not. Performing a
* PIPE_CONTROL with an immediate write will synchronize with
* those earlier writes, so we write 1 when the value has landed.
*/
if (brw->ctx.Extensions.ARB_query_buffer_object &&
brw_is_query_pipelined(query)) {
unsigned flags = PIPE_CONTROL_WRITE_IMMEDIATE;
if (available) {
/* Order available *after* the query results. */
flags |= PIPE_CONTROL_FLUSH_ENABLE;
} else {
/* Make it unavailable *before* any pipelined reads. */
flags |= PIPE_CONTROL_CS_STALL;
}
brw_emit_pipe_control_write(brw, flags,
query->bo, 2 * sizeof(uint64_t),
available);
}
}
static void
write_primitives_generated(struct brw_context *brw,
struct brw_bo *query_bo, int stream, int idx)
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
brw_emit_mi_flush(brw);
if (devinfo->gen >= 7 && stream > 0) {
brw_store_register_mem64(brw, query_bo,
GEN7_SO_PRIM_STORAGE_NEEDED(stream),
idx * sizeof(uint64_t));
} else {
brw_store_register_mem64(brw, query_bo, CL_INVOCATION_COUNT,
idx * sizeof(uint64_t));
}
}
static void
write_xfb_primitives_written(struct brw_context *brw,
struct brw_bo *bo, int stream, int idx)
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
brw_emit_mi_flush(brw);
if (devinfo->gen >= 7) {
brw_store_register_mem64(brw, bo, GEN7_SO_NUM_PRIMS_WRITTEN(stream),
idx * sizeof(uint64_t));
} else {
brw_store_register_mem64(brw, bo, GEN6_SO_NUM_PRIMS_WRITTEN,
idx * sizeof(uint64_t));
}
}
static void
write_xfb_overflow_streams(struct gl_context *ctx,
struct brw_bo *bo, int stream, int count,
int idx)
{
struct brw_context *brw = brw_context(ctx);
const struct gen_device_info *devinfo = &brw->screen->devinfo;
brw_emit_mi_flush(brw);
for (int i = 0; i < count; i++) {
int w_idx = 4 * i + idx;
int g_idx = 4 * i + idx + 2;
if (devinfo->gen >= 7) {
brw_store_register_mem64(brw, bo,
GEN7_SO_NUM_PRIMS_WRITTEN(stream + i),
g_idx * sizeof(uint64_t));
brw_store_register_mem64(brw, bo,
GEN7_SO_PRIM_STORAGE_NEEDED(stream + i),
w_idx * sizeof(uint64_t));
} else {
brw_store_register_mem64(brw, bo,
GEN6_SO_NUM_PRIMS_WRITTEN,
g_idx * sizeof(uint64_t));
brw_store_register_mem64(brw, bo,
GEN6_SO_PRIM_STORAGE_NEEDED,
w_idx * sizeof(uint64_t));
}
}
}
static bool
check_xfb_overflow_streams(uint64_t *results, int count)
{
bool overflow = false;
for (int i = 0; i < count; i++) {
uint64_t *result_i = &results[4 * i];
if ((result_i[3] - result_i[2]) != (result_i[1] - result_i[0])) {
overflow = true;
break;
}
}
return overflow;
}
static inline int
pipeline_target_to_index(int target)
{
if (target == GL_GEOMETRY_SHADER_INVOCATIONS)
return MAX_PIPELINE_STATISTICS - 1;
else
return target - GL_VERTICES_SUBMITTED_ARB;
}
static void
emit_pipeline_stat(struct brw_context *brw, struct brw_bo *bo,
int stream, int target, int idx)
{
const struct gen_device_info *devinfo = &brw->screen->devinfo;
/* One source of confusion is the tessellation shader statistics. The
* hardware has no statistics specific to the TE unit. Ideally we could have
* the HS primitives for TESS_CONTROL_SHADER_PATCHES_ARB, and the DS
* invocations as the register for TESS_CONTROL_SHADER_PATCHES_ARB.
* Unfortunately we don't have HS primitives, we only have HS invocations.
*/
/* Everything except GEOMETRY_SHADER_INVOCATIONS can be kept in a simple
* lookup table
*/
static const uint32_t target_to_register[] = {
IA_VERTICES_COUNT, /* VERTICES_SUBMITTED */
IA_PRIMITIVES_COUNT, /* PRIMITIVES_SUBMITTED */
VS_INVOCATION_COUNT, /* VERTEX_SHADER_INVOCATIONS */
HS_INVOCATION_COUNT, /* TESS_CONTROL_SHADER_PATCHES */
DS_INVOCATION_COUNT, /* TESS_EVALUATION_SHADER_INVOCATIONS */
GS_PRIMITIVES_COUNT, /* GEOMETRY_SHADER_PRIMITIVES_EMITTED */
PS_INVOCATION_COUNT, /* FRAGMENT_SHADER_INVOCATIONS */
CS_INVOCATION_COUNT, /* COMPUTE_SHADER_INVOCATIONS */
CL_INVOCATION_COUNT, /* CLIPPING_INPUT_PRIMITIVES */
CL_PRIMITIVES_COUNT, /* CLIPPING_OUTPUT_PRIMITIVES */
GS_INVOCATION_COUNT /* This one is special... */
};
STATIC_ASSERT(ARRAY_SIZE(target_to_register) == MAX_PIPELINE_STATISTICS);
uint32_t reg = target_to_register[pipeline_target_to_index(target)];
/* Gen6 GS code counts full primitives, that is, it won't count individual
* triangles in a triangle strip. Use CL_INVOCATION_COUNT for that.
*/
if (devinfo->gen == 6 && target == GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB)
reg = CL_INVOCATION_COUNT;
assert(reg != 0);
/* Emit a flush to make sure various parts of the pipeline are complete and
* we get an accurate value
*/
brw_emit_mi_flush(brw);
brw_store_register_mem64(brw, bo, reg, idx * sizeof(uint64_t));
}
/**
* Wait on the query object's BO and calculate the final result.
*/
static void
gen6_queryobj_get_results(struct gl_context *ctx,
struct brw_query_object *query)
{
struct brw_context *brw = brw_context(ctx);
const struct gen_device_info *devinfo = &brw->screen->devinfo;
if (query->bo == NULL)
return;
uint64_t *results = brw_bo_map(brw, query->bo, MAP_READ);
switch (query->Base.Target) {
case GL_TIME_ELAPSED:
/* The query BO contains the starting and ending timestamps.
* Subtract the two and convert to nanoseconds.
*/
query->Base.Result = brw_raw_timestamp_delta(brw, results[0], results[1]);
query->Base.Result = gen_device_info_timebase_scale(devinfo, query->Base.Result);
break;
case GL_TIMESTAMP:
/* The query BO contains a single timestamp value in results[0]. */
query->Base.Result = gen_device_info_timebase_scale(devinfo, results[0]);
/* Ensure the scaled timestamp overflows according to
* GL_QUERY_COUNTER_BITS
*/
query->Base.Result &= (1ull << ctx->Const.QueryCounterBits.Timestamp) - 1;
break;
case GL_SAMPLES_PASSED_ARB:
/* We need to use += rather than = here since some BLT-based operations
* may have added additional samples to our occlusion query value.
*/
query->Base.Result += results[1] - results[0];
break;
case GL_ANY_SAMPLES_PASSED:
case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
if (results[0] != results[1])
query->Base.Result = true;
break;
case GL_PRIMITIVES_GENERATED:
case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
case GL_VERTICES_SUBMITTED_ARB:
case GL_PRIMITIVES_SUBMITTED_ARB:
case GL_VERTEX_SHADER_INVOCATIONS_ARB:
case GL_GEOMETRY_SHADER_INVOCATIONS:
case GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB:
case GL_CLIPPING_INPUT_PRIMITIVES_ARB:
case GL_CLIPPING_OUTPUT_PRIMITIVES_ARB:
case GL_COMPUTE_SHADER_INVOCATIONS_ARB:
case GL_TESS_CONTROL_SHADER_PATCHES_ARB:
case GL_TESS_EVALUATION_SHADER_INVOCATIONS_ARB:
query->Base.Result = results[1] - results[0];
break;
case GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB:
query->Base.Result = check_xfb_overflow_streams(results, 1);
break;
case GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB:
query->Base.Result = check_xfb_overflow_streams(results, MAX_VERTEX_STREAMS);
break;
case GL_FRAGMENT_SHADER_INVOCATIONS_ARB:
query->Base.Result = (results[1] - results[0]);
/* Implement the "WaDividePSInvocationCountBy4:HSW,BDW" workaround:
* "Invocation counter is 4 times actual. WA: SW to divide HW reported
* PS Invocations value by 4."
*
* Prior to Haswell, invocation count was counted by the WM, and it
* buggily counted invocations in units of subspans (2x2 unit). To get the
* correct value, the CS multiplied this by 4. With HSW the logic moved,
* and correctly emitted the number of pixel shader invocations, but,
* whomever forgot to undo the multiply by 4.
*/
if (devinfo->gen == 8 || devinfo->is_haswell)
query->Base.Result /= 4;
break;
default:
unreachable("Unrecognized query target in brw_queryobj_get_results()");
}
brw_bo_unmap(query->bo);
/* Now that we've processed the data stored in the query's buffer object,
* we can release it.
*/
brw_bo_unreference(query->bo);
query->bo = NULL;
query->Base.Ready = true;
}
/**
* Driver hook for glBeginQuery().
*
* Initializes driver structures and emits any GPU commands required to begin
* recording data for the query.
*/
static void
gen6_begin_query(struct gl_context *ctx, struct gl_query_object *q)
{
struct brw_context *brw = brw_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q;
/* Since we're starting a new query, we need to throw away old results. */
brw_bo_unreference(query->bo);
query->bo =
brw_bo_alloc(brw->bufmgr, "query results", 4096, BRW_MEMZONE_OTHER);
/* For ARB_query_buffer_object: The result is not available */
set_query_availability(brw, query, false);
switch (query->Base.Target) {
case GL_TIME_ELAPSED:
/* For timestamp queries, we record the starting time right away so that
* we measure the full time between BeginQuery and EndQuery. There's
* some debate about whether this is the right thing to do. Our decision
* is based on the following text from the ARB_timer_query extension:
*
* "(5) Should the extension measure total time elapsed between the full
* completion of the BeginQuery and EndQuery commands, or just time
* spent in the graphics library?
*
* RESOLVED: This extension will measure the total time elapsed
* between the full completion of these commands. Future extensions
* may implement a query to determine time elapsed at different stages
* of the graphics pipeline."
*
* We write a starting timestamp now (at index 0). At EndQuery() time,
* we'll write a second timestamp (at index 1), and subtract the two to
* obtain the time elapsed. Notably, this includes time elapsed while
* the system was doing other work, such as running other applications.
*/
brw_write_timestamp(brw, query->bo, 0);
break;
case GL_ANY_SAMPLES_PASSED:
case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
case GL_SAMPLES_PASSED_ARB:
brw_write_depth_count(brw, query->bo, 0);
break;
case GL_PRIMITIVES_GENERATED:
write_primitives_generated(brw, query->bo, query->Base.Stream, 0);
if (query->Base.Stream == 0)
ctx->NewDriverState |= BRW_NEW_RASTERIZER_DISCARD;
break;
case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
write_xfb_primitives_written(brw, query->bo, query->Base.Stream, 0);
break;
case GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB:
write_xfb_overflow_streams(ctx, query->bo, query->Base.Stream, 1, 0);
break;
case GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB:
write_xfb_overflow_streams(ctx, query->bo, 0, MAX_VERTEX_STREAMS, 0);
break;
case GL_VERTICES_SUBMITTED_ARB:
case GL_PRIMITIVES_SUBMITTED_ARB:
case GL_VERTEX_SHADER_INVOCATIONS_ARB:
case GL_GEOMETRY_SHADER_INVOCATIONS:
case GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB:
case GL_FRAGMENT_SHADER_INVOCATIONS_ARB:
case GL_CLIPPING_INPUT_PRIMITIVES_ARB:
case GL_CLIPPING_OUTPUT_PRIMITIVES_ARB:
case GL_COMPUTE_SHADER_INVOCATIONS_ARB:
case GL_TESS_CONTROL_SHADER_PATCHES_ARB:
case GL_TESS_EVALUATION_SHADER_INVOCATIONS_ARB:
emit_pipeline_stat(brw, query->bo, query->Base.Stream, query->Base.Target, 0);
break;
default:
unreachable("Unrecognized query target in brw_begin_query()");
}
}
/**
* Driver hook for glEndQuery().
*
* Emits GPU commands to record a final query value, ending any data capturing.
* However, the final result isn't necessarily available until the GPU processes
* those commands. brw_queryobj_get_results() processes the captured data to
* produce the final result.
*/
static void
gen6_end_query(struct gl_context *ctx, struct gl_query_object *q)
{
struct brw_context *brw = brw_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q;
switch (query->Base.Target) {
case GL_TIME_ELAPSED:
brw_write_timestamp(brw, query->bo, 1);
break;
case GL_ANY_SAMPLES_PASSED:
case GL_ANY_SAMPLES_PASSED_CONSERVATIVE:
case GL_SAMPLES_PASSED_ARB:
brw_write_depth_count(brw, query->bo, 1);
break;
case GL_PRIMITIVES_GENERATED:
write_primitives_generated(brw, query->bo, query->Base.Stream, 1);
if (query->Base.Stream == 0)
ctx->NewDriverState |= BRW_NEW_RASTERIZER_DISCARD;
break;
case GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN:
write_xfb_primitives_written(brw, query->bo, query->Base.Stream, 1);
break;
case GL_TRANSFORM_FEEDBACK_STREAM_OVERFLOW_ARB:
write_xfb_overflow_streams(ctx, query->bo, query->Base.Stream, 1, 1);
break;
case GL_TRANSFORM_FEEDBACK_OVERFLOW_ARB:
write_xfb_overflow_streams(ctx, query->bo, 0, MAX_VERTEX_STREAMS, 1);
break;
/* calculate overflow here */
case GL_VERTICES_SUBMITTED_ARB:
case GL_PRIMITIVES_SUBMITTED_ARB:
case GL_VERTEX_SHADER_INVOCATIONS_ARB:
case GL_GEOMETRY_SHADER_PRIMITIVES_EMITTED_ARB:
case GL_FRAGMENT_SHADER_INVOCATIONS_ARB:
case GL_COMPUTE_SHADER_INVOCATIONS_ARB:
case GL_CLIPPING_INPUT_PRIMITIVES_ARB:
case GL_CLIPPING_OUTPUT_PRIMITIVES_ARB:
case GL_GEOMETRY_SHADER_INVOCATIONS:
case GL_TESS_CONTROL_SHADER_PATCHES_ARB:
case GL_TESS_EVALUATION_SHADER_INVOCATIONS_ARB:
emit_pipeline_stat(brw, query->bo,
query->Base.Stream, query->Base.Target, 1);
break;
default:
unreachable("Unrecognized query target in brw_end_query()");
}
/* The current batch contains the commands to handle EndQuery(),
* but they won't actually execute until it is flushed.
*/
query->flushed = false;
/* For ARB_query_buffer_object: The result is now available */
set_query_availability(brw, query, true);
}
/**
* Flush the batch if it still references the query object BO.
*/
static void
flush_batch_if_needed(struct brw_context *brw, struct brw_query_object *query)
{
/* If the batch doesn't reference the BO, it must have been flushed
* (for example, due to being full). Record that it's been flushed.
*/
query->flushed = query->flushed ||
!brw_batch_references(&brw->batch, query->bo);
if (!query->flushed)
intel_batchbuffer_flush(brw);
}
/**
* The WaitQuery() driver hook.
*
* Wait for a query result to become available and return it. This is the
* backing for glGetQueryObjectiv() with the GL_QUERY_RESULT pname.
*/
static void gen6_wait_query(struct gl_context *ctx, struct gl_query_object *q)
{
struct brw_context *brw = brw_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q;
/* If the application has requested the query result, but this batch is
* still contributing to it, flush it now to finish that work so the
* result will become available (eventually).
*/
flush_batch_if_needed(brw, query);
gen6_queryobj_get_results(ctx, query);
}
/**
* The CheckQuery() driver hook.
*
* Checks whether a query result is ready yet. If not, flushes.
* This is the backing for glGetQueryObjectiv()'s QUERY_RESULT_AVAILABLE pname.
*/
static void gen6_check_query(struct gl_context *ctx, struct gl_query_object *q)
{
struct brw_context *brw = brw_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q;
/* If query->bo is NULL, we've already gathered the results - this is a
* redundant CheckQuery call. Ignore it.
*/
if (query->bo == NULL)
return;
/* From the GL_ARB_occlusion_query spec:
*
* "Instead of allowing for an infinite loop, performing a
* QUERY_RESULT_AVAILABLE_ARB will perform a flush if the result is
* not ready yet on the first time it is queried. This ensures that
* the async query will return true in finite time.
*/
flush_batch_if_needed(brw, query);
if (!brw_bo_busy(query->bo)) {
gen6_queryobj_get_results(ctx, query);
}
}
static void
gen6_query_counter(struct gl_context *ctx, struct gl_query_object *q)
{
struct brw_context *brw = brw_context(ctx);
struct brw_query_object *query = (struct brw_query_object *)q;
brw_query_counter(ctx, q);
set_query_availability(brw, query, true);
}
/* Initialize Gen6+-specific query object functions. */
void gen6_init_queryobj_functions(struct dd_function_table *functions)
{
functions->BeginQuery = gen6_begin_query;
functions->EndQuery = gen6_end_query;
functions->CheckQuery = gen6_check_query;
functions->WaitQuery = gen6_wait_query;
functions->QueryCounter = gen6_query_counter;
}
| {
"pile_set_name": "Github"
} |
config BR2_PACKAGE_PYTHON_PATHVALIDATE
bool "python-pathvalidate"
help
pathvalidate is a python library to validate/sanitize a string
such as filename/variable-name.
https://github.com/thombashi/pathvalidate
| {
"pile_set_name": "Github"
} |
// Copyright Jetstack Ltd. See LICENSE for details.
package cmd
import (
"os"
"github.com/spf13/cobra"
"github.com/jetstack/tarmak/pkg/tarmak/utils"
"github.com/jetstack/tarmak/pkg/terraform"
)
// ensure plugin clients get closed after subcommand run
func terraformPassthrough(args []string, f func([]string, <-chan struct{}) int) int {
return f(args, utils.MakeShutdownCh())
}
var internalPluginCmd = &cobra.Command{
Use: "internal-plugin",
Run: func(cmd *cobra.Command, args []string) {
os.Exit(terraform.InternalPlugin(args))
},
Hidden: true,
DisableFlagParsing: true,
}
var terraformCmd = &cobra.Command{
Use: "terraform",
Hidden: true,
DisableFlagParsing: true,
}
var terraformPlanCmd = &cobra.Command{
Use: "plan",
Run: func(cmd *cobra.Command, args []string) {
os.Exit(terraformPassthrough(args, terraform.Plan))
},
Hidden: true,
DisableFlagParsing: true,
}
var terraformApplyCmd = &cobra.Command{
Use: "apply",
Run: func(cmd *cobra.Command, args []string) {
os.Exit(terraformPassthrough(args, terraform.Apply))
},
Hidden: true,
DisableFlagParsing: true,
}
var terraformDestroyCmd = &cobra.Command{
Use: "destroy",
Run: func(cmd *cobra.Command, args []string) {
os.Exit(terraformPassthrough(args, terraform.Destroy))
},
Hidden: true,
DisableFlagParsing: true,
}
var terraformOutputCmd = &cobra.Command{
Use: "output",
Run: func(cmd *cobra.Command, args []string) {
os.Exit(terraformPassthrough(args, terraform.Output))
},
Hidden: true,
DisableFlagParsing: true,
}
var terraformInitCmd = &cobra.Command{
Use: "init",
Run: func(cmd *cobra.Command, args []string) {
os.Exit(terraformPassthrough(args, terraform.Init))
},
Hidden: true,
DisableFlagParsing: true,
}
var terraformForceUnlockCmd = &cobra.Command{
Use: "force-unlock",
Run: func(cmd *cobra.Command, args []string) {
os.Exit(terraformPassthrough(args, terraform.Unlock))
},
Hidden: true,
DisableFlagParsing: true,
}
var terraformFmtCmd = &cobra.Command{
Use: "fmt",
Run: func(cmd *cobra.Command, args []string) {
os.Exit(terraformPassthrough(args, terraform.Fmt))
},
Hidden: true,
DisableFlagParsing: true,
}
var terraformValidateCmd = &cobra.Command{
Use: "validate",
Run: func(cmd *cobra.Command, args []string) {
os.Exit(terraformPassthrough(args, terraform.Validate))
},
Hidden: true,
DisableFlagParsing: true,
}
var terraformTaintCmd = &cobra.Command{
Use: "taint",
Run: func(cmd *cobra.Command, args []string) {
os.Exit(terraformPassthrough(args, terraform.Taint))
},
Hidden: true,
DisableFlagParsing: true,
}
func init() {
RootCmd.AddCommand(internalPluginCmd)
terraformCmd.AddCommand(terraformInitCmd)
terraformCmd.AddCommand(terraformPlanCmd)
terraformCmd.AddCommand(terraformApplyCmd)
terraformCmd.AddCommand(terraformDestroyCmd)
terraformCmd.AddCommand(terraformForceUnlockCmd)
terraformCmd.AddCommand(terraformOutputCmd)
terraformCmd.AddCommand(terraformFmtCmd)
terraformCmd.AddCommand(terraformValidateCmd)
terraformCmd.AddCommand(terraformTaintCmd)
RootCmd.AddCommand(terraformCmd)
}
| {
"pile_set_name": "Github"
} |
/* @ngInject */
function blackFridayModalOpener(
blackFridayModel,
blackFridayModal,
subscriptionModel,
networkActivityTracker,
dispatchers
) {
let STATE = {};
const { on } = dispatchers();
on('logout', () => {
STATE = {};
});
const openModal = () => {
blackFridayModal.activate();
};
return async () => {
if (STATE.loading) {
return STATE.loading;
}
const currency = subscriptionModel.currency();
/**
* This is a bit special to show the spinner before the modal has opened.
*/
if (!STATE.hasData || currency !== STATE.currency) {
STATE.hasData = false;
STATE.currency = currency;
STATE.loading = blackFridayModel
.getOffers(currency)
.then(() => {
STATE.hasData = true;
STATE.loading = undefined;
openModal();
})
.catch((e) => {
STATE.currency = undefined;
STATE.loading = undefined;
throw e;
});
networkActivityTracker.track(STATE.loading);
return STATE.loading;
}
openModal();
};
}
export default blackFridayModalOpener;
| {
"pile_set_name": "Github"
} |
#include <iostream>
#include <string>
using namespace std;
int hammingDistance(const string str1, const string str2)
{
//IF LENGTH BEETWEEN STRINGS IS DIFFERENT
if(str1.length() != str2.length())
return -1;
else
{
int dist = 0;
//COMPARE PARALLEL CHARACTERS
for(unsigned i = 0; i < str1.length(); i++)
//IF DIFFERENT, INCREMENT DISTANCE VARIABLE
if(str1[i] != str2[i])
++dist;
return dist;
}
}
int main()
{
string str1, str2;
//INPUT TWO STRINGS
cout<<"Insert first string: ";
cin>>str1;
cout<<"Insert second string (of equal length): ";
cin>>str2;
//OUTPUT HAMMING DISTANCE
cout<<"Hamming distance between these two strings (-1 if length is different): "<<hammingDistance(str1, str2)<<endl;
return 0;
}
| {
"pile_set_name": "Github"
} |
BEGIN;
DROP INDEX builds_job_id_succeeded_idx;
COMMIT;
| {
"pile_set_name": "Github"
} |
<?xml version='1.0' encoding='utf-8'?>
<section xmlns="https://code.dccouncil.us/schemas/dc-library" xmlns:codified="https://code.dccouncil.us/schemas/codified" xmlns:codify="https://code.dccouncil.us/schemas/codify" xmlns:xi="http://www.w3.org/2001/XInclude" containing-doc="D.C. Code">
<num>2-1528</num>
<reason>Repealed</reason>
<heading>Biannual reports.</heading>
<text>Repealed.</text>
<annotations>
<annotation doc="D.C. Law 7-227" type="History" path="§9">Mar. 16, 1989, D.C. Law 7-227, § 9, 36 DCR 607</annotation>
<annotation doc="D.C. Law 12-86" type="History">Apr. 29, 1998, D.C. Law 12-86, § 401, 45 DCR 1172</annotation>
<annotation type="Prior Codifications">1981 Ed., § 1-2628.</annotation>
</annotations>
</section>
| {
"pile_set_name": "Github"
} |
ruby '2.3.1'
source 'https://rubygems.org' do
gem 'jekyll'
gem 'semantic'
gem 'html-proofer'
end
| {
"pile_set_name": "Github"
} |
/*!
* 分页处理.
*
* @since: 1.0.0
* @author Way Lau <https://waylau.com>
*/
(function($) {
"use strict";
/**
* handler:pageIndex 所选页面的索引,从0开始;pageSize 页面的大小,这里默认是10。
*/
$.tbpage = function(selector, handler) {
$(selector).off("click", ".tbpage-item").on("click", ".tbpage-item", function() {
var pageIndex = $(this).attr("pageIndex");
var pageSize = $('.tbpage-size option:selected').val();
// 判断所选元素是否为当前页面
// 若不是当前页面才需要处理
if($(this).parent().attr("class").indexOf("active")>0){
//console.log("为当前页面");
}else{
handler(pageIndex, pageSize);
}
});
$(selector).off("change", ".tbpage-size").on("change", ".tbpage-size", function() {
var pageIndex = $(this).attr("pageIndex");
var pageSize = $('.tbpage-size option:selected').val();
handler(pageIndex, pageSize);
});
};
})(jQuery); | {
"pile_set_name": "Github"
} |
/* Base16 Atelier Lakeside Dark - Theme */
/* by Bram de Haan (http://atelierbram.github.io/syntax-highlighting/atelier-schemes/lakeside/) */
/* Original Base16 color scheme by Chris Kempson (https://github.com/chriskempson/base16) */
/* https://github.com/jmblog/color-themes-for-highlightjs */
/* Atelier Lakeside Dark Comment */
.hljs-comment,
.hljs-title {
color: #7195a8;
}
/* Atelier Lakeside Dark Red */
.hljs-variable,
.hljs-attribute,
.hljs-tag,
.hljs-regexp,
.ruby .hljs-constant,
.xml .hljs-tag .hljs-title,
.xml .hljs-pi,
.xml .hljs-doctype,
.html .hljs-doctype,
.css .hljs-id,
.css .hljs-class,
.css .hljs-pseudo {
color: #d22d72;
}
/* Atelier Lakeside Dark Orange */
.hljs-number,
.hljs-preprocessor,
.hljs-pragma,
.hljs-built_in,
.hljs-literal,
.hljs-params,
.hljs-constant {
color: #935c25;
}
/* Atelier Lakeside Dark Yellow */
.hljs-ruby .hljs-class .hljs-title,
.css .hljs-rules .hljs-attribute {
color: #8a8a0f;
}
/* Atelier Lakeside Dark Green */
.hljs-string,
.hljs-value,
.hljs-inheritance,
.hljs-header,
.ruby .hljs-symbol,
.xml .hljs-cdata {
color: #568c3b;
}
/* Atelier Lakeside Dark Aqua */
.css .hljs-hexcolor {
color: #2d8f6f;
}
/* Atelier Lakeside Dark Blue */
.hljs-function,
.python .hljs-decorator,
.python .hljs-title,
.ruby .hljs-function .hljs-title,
.ruby .hljs-title .hljs-keyword,
.perl .hljs-sub,
.javascript .hljs-title,
.coffeescript .hljs-title {
color: #257fad;
}
/* Atelier Lakeside Dark Purple */
.hljs-keyword,
.javascript .hljs-function {
color: #5d5db1;
}
.hljs {
display: block;
background: #1f292e;
color: #7ea2b4;
padding: 0.5em;
}
.coffeescript .javascript,
.javascript .xml,
.tex .hljs-formula,
.xml .javascript,
.xml .vbscript,
.xml .css,
.xml .hljs-cdata {
opacity: 0.5;
}
| {
"pile_set_name": "Github"
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.