repo_name
stringlengths
4
116
path
stringlengths
3
942
size
stringlengths
1
7
content
stringlengths
3
1.05M
license
stringclasses
15 values
niedzielski/pixipedia
app/src/main/java/com/niedzielski/pixipedia/android/activity/ImageFragment.java
1551
package com.niedzielski.pixipedia.android.activity; import android.os.Bundle; import android.support.annotation.Nullable; import android.view.LayoutInflater; import android.view.View; import android.view.ViewGroup; import android.widget.ImageView; import com.niedzielski.pixipedia.android.R; import com.niedzielski.pixipedia.android.util.ImageUtil; import butterknife.InjectView; public class ImageFragment extends DefaultFragment { /*default*/ static final String FRAGMENT_ARG_IMAGE_URL_KEY = "imageUrl"; @InjectView(R.id.image) protected ImageView mImageView; private String mImageUrl; public static ImageFragment newInstance(String imageUrl) { ImageFragment fragment = new ImageFragment(); fragment.setArguments(buildFragmentArgs(imageUrl)); return fragment; } @Override public View onCreateView(LayoutInflater inflater, @Nullable ViewGroup container, @Nullable Bundle savedInstanceState) { View view = super.onCreateView(inflater, container, savedInstanceState); ImageUtil.load(mImageUrl, mImageView); return view; } @Override protected int getLayout() { return R.layout.fragment_page_image; } @Override protected void initFromFragmentArgs() { mImageUrl = getArguments().getString(FRAGMENT_ARG_IMAGE_URL_KEY); } private static Bundle buildFragmentArgs(String imageUrl) { Bundle ret = new Bundle(); ret.putString(FRAGMENT_ARG_IMAGE_URL_KEY, imageUrl); return ret; } }
apache-2.0
m-m-m/code
base/src/main/java/net/sf/mmm/code/base/arg/BaseParameters.java
4951
/* Copyright (c) The m-m-m Team, Licensed under the Apache License, Version 2.0 * http://www.apache.org/licenses/LICENSE-2.0 */ package net.sf.mmm.code.base.arg; import java.io.IOException; import java.lang.reflect.Executable; import java.lang.reflect.Parameter; import java.util.List; import java.util.function.Consumer; import net.sf.mmm.code.api.arg.CodeParameter; import net.sf.mmm.code.api.arg.CodeParameters; import net.sf.mmm.code.api.copy.CodeCopyMapper; import net.sf.mmm.code.api.language.CodeLanguage; import net.sf.mmm.code.api.member.CodeOperation; import net.sf.mmm.code.api.merge.CodeMergeStrategy; import net.sf.mmm.code.base.member.BaseOperation; /** * Base implementation of {@link CodeParameters}. * * @author Joerg Hohwiller (hohwille at users.sourceforge.net) * @since 1.0.0 */ public class BaseParameters extends BaseOperationArgs<CodeParameter> implements CodeParameters { /** * The constructor. * * @param parent the {@link #getParent() parent}. */ public BaseParameters(BaseOperation parent) { super(parent); } /** * The copy-constructor. * * @param template the {@link BaseParameters} to copy. * @param mapper the {@link CodeCopyMapper}. */ public BaseParameters(BaseParameters template, CodeCopyMapper mapper) { super(template, mapper); } @Override protected void doInitialize() { super.doInitialize(); Executable reflectiveObject = getParent().getReflectiveObject(); List<? extends CodeParameter> sourceParams = null; int sourceParamsCount = 0; CodeParameters sourceParameters = getSourceCodeObject(); if (sourceParameters != null) { sourceParams = sourceParameters.getDeclared(); sourceParamsCount = sourceParams.size(); } if (reflectiveObject != null) { List<CodeParameter> list = getList(); int i = 0; for (Parameter param : reflectiveObject.getParameters()) { String name = null; CodeParameter baseParameter = null; if ((i < sourceParamsCount) && (sourceParams != null)) { baseParameter = sourceParams.get(i++); name = baseParameter.getName(); } if (name == null) { name = param.getName(); } BaseParameter parameter = new BaseParameter(this, name, param, baseParameter); list.add(parameter); } } } @Override public CodeParameter getDeclared(String name) { initialize(); return getByName(name); } @Override public CodeParameter add(String name) { BaseParameter parameter = new BaseParameter(this, name); add(parameter); return parameter; } @Override public CodeParameters getSourceCodeObject() { CodeOperation sourceOperation = getParent().getSourceCodeObject(); if (sourceOperation != null) { return sourceOperation.getParameters(); } return null; } @Override protected void rename(CodeParameter child, String oldName, String newName, Consumer<String> renamer) { super.rename(child, oldName, newName, renamer); } @Override public CodeParameters merge(CodeParameters o, CodeMergeStrategy strategy) { if (strategy == CodeMergeStrategy.KEEP) { return this; } BaseParameters other = (BaseParameters) o; List<? extends CodeParameter> otherParameters = other.getDeclared(); if (strategy == CodeMergeStrategy.OVERRIDE) { clear(); for (CodeParameter otherParameter : otherParameters) { CodeParameter copyParameter = doCopyNode(otherParameter, this); add(copyParameter); } } else { List<? extends CodeParameter> myParameters = getDeclared(); int i = 0; int len = myParameters.size(); assert (len == otherParameters.size()); for (CodeParameter otherParameter : otherParameters) { CodeParameter myParameter = null; if (i < len) { myParameter = myParameters.get(i++); // merging via index as by name could cause errors on mismatch } if (myParameter == null) { CodeParameter copyParameter = doCopyNode(otherParameter, this); add(copyParameter); } else { myParameter.merge(otherParameter, strategy); } } } return this; } @Override public BaseParameters copy() { return copy(getDefaultCopyMapper()); } @Override public BaseParameters copy(CodeCopyMapper mapper) { return new BaseParameters(this, mapper); } @Override protected void doWrite(Appendable sink, String newline, String defaultIndent, String currentIndent, CodeLanguage language) throws IOException { writeReference(sink, newline, true); } void writeReference(Appendable sink, String newline, boolean declaration) throws IOException { String prefix = ""; for (CodeParameter parameter : getList()) { sink.append(prefix); parameter.write(sink, newline, null, null); prefix = ", "; } } }
apache-2.0
orionzhou/rgeneclust
hmm.stat.pl
1852
#!/usr/bin/perl -w # # POD documentation #--------------------------------------------------------------------------- =pod BEGIN =head1 NAME hmm.stat.pl - =head1 SYNOPSIS hmm.stat.pl [-help] [-i input-file] [-o output-file] Options: -h (--help) brief help message -i (--in) input file (HMM) -o (--out) output file (Tbl) =cut #### END of POD documentation. #--------------------------------------------------------------------------- use strict; use Getopt::Long; use Pod::Usage; use FindBin; use lib "$FindBin::Bin"; use Common; use Data::Dumper; use File::Path qw/make_path remove_tree/; use File::Basename; use List::Util qw/min max sum/; my ($fi, $fo) = ('') x 2; my $help_flag; #--------------------------------- MAIN -----------------------------------# GetOptions( "help|h" => \$help_flag, "in|i=s" => \$fi, "out|o=s" => \$fo, ) or pod2usage(2); pod2usage(1) if $help_flag; pod2usage(2) if !$fi || !$fo; my $fho; if ($fo eq '' || $fo eq "stdout" || $fo eq "-") { $fho = \*STDOUT; } else { open ($fho, ">$fo") || die "cannot write $fo\n"; } sub check_gap { my ($fi) = @_; my $ai = Bio::AlignIO->new(-file=>"<$fi"); my $gap = 0; while(my $aln = $ai->next_aln()) { for my $seq ($aln->each_seq()) { if($seq->seq =~ /[\-\.]/) { $gap = 1; last; } } } return $gap; } print $fho join("\t", qw/id acc accl nseq len gap conseq/)."\n"; my $lines = runCmd("hmmstat $fi", 2); for (@$lines) { /(^\#)|(^\s*$)/ && next; my @ps = split " "; my ($id, $accl, $nseq, $len) = @ps[1,2,3,5]; $accl = "" if $accl eq "-"; my $acc = $accl; $acc =~ s/\.\d+$//; # my $seqlines = runCmd("hmmemit -c $fi", 2); # my $seq = join("", @$seqlines[1..@$seqlines-1]); print $fho join("\t", $id, $acc, $accl, $nseq, $len, '', '')."\n"; } close $fho;
apache-2.0
wuzongxian926/video_list
camxj.md
3928
# <div align = center>¡¶³¬°®ÃÀС½ã¡·È«¼¯</div> #### <div align = center>×îÐĄ̂Íå×ÛÒÕ £º[www.twzy.tw](http://www.twzy.tw) | ̨Íå×ÛÒÕÏÂÔØ £º[xz.twzy.tw](http://xz.twzy.tw) | ̨Íå×ÛÒÕÈ«¼¯ £º[qj.twzy.tw](http://qj.twzy.tw)</dir> ##### <div align = center>΢ÐŹ«ÖںŠ£ºdaleloogn | ±¦µº¹º¹º¹º Îâ×ÚÏÜ·ÛË¿|̨×Ûtwzy.tw-QQȺ£º[208096042](http://shang.qq.com/wpa/qunwpa?idkey=321d0a9d5570812413860c1a5c653e05859452e0629f65b7353ef70f5a5d68e7) ÊղظßÇåÔ­ÅÌ×ÊÔ´ÇëÁªÏµQQ £º[12347450](http://wpa.qq.com/msgrd?v=3&uin=12347450&site=qq&menu=yes)</dir> <div align = center><img src="https://github.com/wuzongxian926/video_list/blob/master/GIT.jpg?raw=true" width="100%" height="100%" alt="±¦µº¹º¹º¹ºÆßÔÂîÒ»Ý" /></div> ÈÕÆÚ|Ö÷Ìâ|À´±ö|ÔÚÏß|ÏÂÔØ ----|----|----|----|---- |2016/10/23|ʱÉÐÖ÷ÌâÈÕ Ëµ×ß¾Í×ßµÄÂÃÐÐ|·¼Îâù,ÁõÈݼÎ,°¬ÃÀ|[µã»÷²¥·Å](http://www.acfun.tv/v/ac3204337)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/10/22|ÌØ±ðÆó»® Ê×¶ûʱÉÐÖ®ÂÃ|ÏĺÍÎõ,°¬ÃÀ|[²¥·Å](http://www.acfun.tv/v/ac3204339)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/10/16|“Q¼¾±£ðBÕ¬ÃÀÈÝÉñÆ÷|À´±ö|[µã»÷²¥·Å](http://www.acfun.tv/v/ac3185943)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/10/15|È«Ãñ¶¼ÔÚ¯‚µÄÊÖß[•rÉÐ|À´±ö|[µã»÷²¥·Å](http://www.acfun.tv/v/ac3185937)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/10/01|116 ínÁ÷starÁ÷ÐЕrÉÐ|À´±ö|[µã»÷²¥·Å](http://www.bilibili.com/video/av6525232/)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/9/24|115¼¯ ÄÐÉñðB³ÉÐg|À´±ö|[µã»÷²¥·Å](http://www.bilibili.com/video/av6420791/)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/9/10|114¼¯ ÄÏͶºÃºÃÍæÌØ„eÆó„|À´±ö|[µã»÷²¥·Å](http://www.bilibili.com/video/av6342690/)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/9/04|113¼¯ Ò»Æðí®”•rÉÐÉú»îºÃÒ°ÈË|À´±ö|[µã»÷²¥·Å](http://www.bilibili.com/video/av6327288/)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/9/03|²»ÀË·Ñ»úƱǮ Âú«±Ø°Ü¹¥ÂÔ|Sam,ZUZU,С¿Ù,³ÂÐÀÐÀ,Ã×Á£Q|[µã»÷²¥·Å](http://www.bilibili.com/video/av6237272/)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/8/28|111¼¯ ÂÃín±Ø”¡¹¥ÂÔ(ÏÂ)|À´±ö|[µã»÷²¥·Å](http://www.bilibili.com/video/av6174561/)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/8/27|ר¼ÒÈÃÄã¿ìÀÖËÜ|СÌðÌð,Peter,ÀîÃ÷´¨,À³Ç,À´»¨,ºè½Ü|[µã»÷²¥·Å](http://www.bilibili.com/video/av6162883/)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/8/21|Á÷À˶¯ÎïÓöµ½°®|Tim,ÁÎÕþ»Ô,·¼ÎÐÀ,Ò˰Ø,˹³½,»ÆÝ¶¾û,ÕÅ׿³Ï,ÓñÍÃ|[µã»÷²¥·Å](http://www.bilibili.com/video/av6019561/)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/8/20|Á÷À˄ÓÎïÓöµ½Û|Tim,ÁÎÕþ»Ô,·¼ÎÐÀ,Ò˰Ø,˹³½,»ÆÝ¶¾û,ÕÅ׿³Ï,ÓñÍÃ|[µã»÷²¥·Å](http://www.aixifan.com/v/ac3041961)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/8/14|107¼¯ ¾W·•rÉÐÈ˚âÍõ|À´±ö|[µã»÷²¥·Å](http://www.bilibili.com/video/av5926269/)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/8/13|•rÉоÞÐÇÈÕLULU ÍêÕû°æ|À´±ö|[µã»÷²¥·Å](http://www.bilibili.com/video/av5801248/)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/8/07|»éÀñºÃºÃÍæ|Apple,Ann,Nikki|[µã»÷²¥·Å](http://www.bilibili.com/video/av5705934/)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/8/06|½ñÏÄÖÆ°Ôº£ž©•rÉд©´î|À´±ö|[µã»÷²¥·Å](http://www.bilibili.com/video/av5686784/)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/7/31|ÄæýgÃÀÈ˱£ðBÐg|À´±ö|[µã»÷²¥·Å](http://www.acfun.tv/v/ac2948847)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/7/30|¸æ±ð¾Ã×øÈâÈâÅ®|ׯ±ÌÓñ,LULU,СÌðÌð,ÓñÍÃ,MEIMEI|[µã»÷²¥·Å](http://www.acfun.tv/v/ac2946198)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/7/23|­h±£•rÉÐ ÛÃÀÒ²ÒªÛµØÇò|À´±ö|[µã»÷²¥·Å](http://www.acfun.tv/v/ac2928686)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/7/17|•rÉÐshow room|À´±ö|[µã»÷²¥·Å](http://www.acfun.tv/v/ac2903213)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/7/16|´©´îiconÏÄÈÕʱÉÐBest Buy|¹Ø,ÏĺÍÎõ,¿­Ï£,ÌÆÝÚ,¼òâýµÙ|[µã»÷²¥·Å](http://www.acfun.tv/v/ac2901044)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/7/09|ʱÉоÞÐÇÈÕÎâ˼ÏÍ|Îâ˼ÏÍ,ÁÎÔʽÜ,ÏĺÍÎõ,Ò˰Ø|[µã»÷²¥·Å](http://www.acfun.tv/v/ac2881816)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/7/03|ÏÄÈÕʱÉÐÄãÒªÖªµÀ|¹Ø,³ÂÐÀÐÀ,ÓñÍÃ,´ó¾Ã±£ÂéÀæ×Ó,Ò˰Ø,ÖÒºÀ|[µã»÷²¥·Å](http://www.acfun.tv/v/ac2864655)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw) |2016/7/02|·Â×±¸ßÊÖPKÈü »á»­±ðÈ˲ÅÀ÷º¦|M4(ÁÖÀÖΰ,¼Íåû,¹ùæ¼Æí,Ã×ÑÇÈô),ÉÈ,²¨Æ¦|[µã»÷²¥·Å](http://www.acfun.tv/v/ac2881102)|[ÍøÅÌÏÂÔØ](http://xz.twzy.tw)
apache-2.0
kierarad/gocd
server/src/main/webapp/WEB-INF/rails/webpack/views/pages/config_repos/config_repo_view_model.ts
4560
/* * Copyright 2019 ThoughtWorks, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import {ApiResult} from "helpers/api_request_builder"; import m from "mithril"; import Stream from "mithril/stream"; import {AbstractObjCache, ObjectCache, rejectAsString} from "models/base/cache"; import {ConfigReposCRUD} from "models/config_repos/config_repos_crud"; import {DefinedStructures} from "models/config_repos/defined_structures"; import {ConfigRepo} from "models/config_repos/types"; import {EventAware} from "models/mixins/event_aware"; import {DeleteConfirmModal} from "views/components/modal/delete_confirm_modal"; import {EditConfigRepoModal} from "views/pages/config_repos/modals"; import {FlashContainer, RequiresPluginInfos, SaveOperation} from "views/pages/page_operations"; interface PageResources extends SaveOperation, RequiresPluginInfos, FlashContainer {} class CRResultCache extends AbstractObjCache<DefinedStructures> { private repoId: string; private etag: Stream<string> = Stream(); constructor(repoId: string) { super(); this.repoId = repoId; } doFetch(resolve: (data: DefinedStructures) => void, reject: (reason: string) => void) { DefinedStructures.fetch(this.repoId, this.etag()).then((result) => { if (304 === result.getStatusCode()) { resolve(this.contents()); // no change return; } if (result.getEtag()) { this.etag(result.getEtag()!); } result.do((resp) => { resolve(DefinedStructures.fromJSON(JSON.parse(resp.body))); }, (error) => { reject(error.message); }); }).catch(rejectAsString(reject)); } empty() { // don't dump contents, just force a fresh set of data this.etag = Stream(); } } // a subset of Event interface Propagable { stopPropagation: () => void; } export class ConfigRepoVM { repo: ConfigRepo; results: ObjectCache<DefinedStructures>; reparseRepo: (e: Propagable) => Promise<void>; showEditModal: (e: Propagable) => void; showDeleteModal: (e: Propagable) => void; constructor(repo: ConfigRepo, page: PageResources, results?: ObjectCache<DefinedStructures>) { const cache = results || new CRResultCache(repo.id()!); Object.assign(ConfigRepoVM.prototype, EventAware.prototype); EventAware.call(this); this.repo = repo; this.results = cache; this.on("expand", () => !cache.failed() && cache.prime(m.redraw)); this.on("refresh", () => (cache.invalidate(), cache.prime(m.redraw))); this.reparseRepo = (e) => { e.stopPropagation(); page.flash.clear(); const repoId = this.repo.id()!; return ConfigReposCRUD.triggerUpdate(repoId).then((result: ApiResult<any>) => { repo.materialUpdateInProgress(true); result.do(() => { page.flash.success(`An update was scheduled for '${repoId}' config repository.`); this.notify("refresh"); }, (err) => { page.flash.alert(`Unable to schedule an update for '${repoId}' config repository. ${err.message}`); }); }); }; this.showEditModal = (e) => { e.stopPropagation(); page.flash.clear(); new EditConfigRepoModal(this.repo.id()!, page.onSuccessfulSave, page.onError, page.pluginInfos).render(); }; this.showDeleteModal = (e) => { e.stopPropagation(); page.flash.clear(); const message = ["Are you sure you want to delete the config repository ", m("strong", this.repo.id()), "?"]; const modal = new DeleteConfirmModal(message, () => { ConfigReposCRUD.delete(this.repo).then((resp) => { resp.do( (resp) => page.onSuccessfulSave(resp.body.message), (err) => page.onError(err.message)); }).then(modal.close.bind(modal)); }); modal.render(); }; } } // tslint:disable-next-line export interface ConfigRepoVM extends EventAware {} export interface CRVMAware { vm: ConfigRepoVM; }
apache-2.0
NLPIE/BioMedICUS
doc/dokka/biomedicus-core/edu.umn.biomedicus.measures/-number-context-writer/-init-.html
1222
<HTML> <HEAD> <meta charset="UTF-8"> <title>NumberContextWriter.<init> - biomedicus-core</title> <link rel="stylesheet" href="../../../style.css"> </HEAD> <BODY> <a href="../../index.html">biomedicus-core</a>&nbsp;/&nbsp;<a href="../index.html">edu.umn.biomedicus.measures</a>&nbsp;/&nbsp;<a href="index.html">NumberContextWriter</a>&nbsp;/&nbsp;<a href="./-init-.html">&lt;init&gt;</a><br/> <br/> <h1>&lt;init&gt;</h1> <a name="edu.umn.biomedicus.measures.NumberContextWriter$&lt;init&gt;(java.nio.file.Path, kotlin.Int)"></a> <code><span class="identifier">NumberContextWriter</span><span class="symbol">(</span><span class="identifier" id="edu.umn.biomedicus.measures.NumberContextWriter$<init>(java.nio.file.Path, kotlin.Int)/outputDirectory">outputDirectory</span><span class="symbol">:</span>&nbsp;<span class="identifier">Path</span><span class="symbol">, </span><span class="identifier" id="edu.umn.biomedicus.measures.NumberContextWriter$<init>(java.nio.file.Path, kotlin.Int)/contextSize">contextSize</span><span class="symbol">:</span>&nbsp;<a href="https://kotlinlang.org/api/latest/jvm/stdlib/kotlin/-int/index.html"><span class="identifier">Int</span></a><span class="symbol">)</span></code> </BODY> </HTML>
apache-2.0
dcarda/aba.route.validator
target13/site/clover/com/cardatechnologies/utils/validators/abaroutevalidator/Test_AbaRouteValidator_03_testAbaNumberCheck_3354_bad_ycb.html
10986
<!DOCTYPE html> <html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en"> <head> <meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/> <link rel="SHORTCUT ICON" href="../../../../../img/clover.ico" /> <link rel="stylesheet" href="../../../../../aui/css/aui.min.css" media="all"/> <link rel="stylesheet" href="../../../../../aui/css/aui-experimental.min.css" media="all"/> <!--[if IE 9]><link rel="stylesheet" href="../../../../../aui/css/aui-ie9.min.css" media="all"/><![endif]--> <style type="text/css" media="all"> @import url('../../../../../style.css'); @import url('../../../../../tree.css'); </style> <script src="../../../../../jquery-1.8.3.min.js" type="text/javascript"></script> <script src="../../../../../aui/js/aui.min.js" type="text/javascript"></script> <script src="../../../../../aui/js/aui-experimental.min.js" type="text/javascript"></script> <script src="../../../../../aui/js/aui-soy.min.js" type="text/javascript"></script> <script src="../../../../../package-nodes-tree.js" type="text/javascript"></script> <script src="../../../../../clover-tree.js" type="text/javascript"></script> <script src="../../../../../clover.js" type="text/javascript"></script> <script src="../../../../../clover-descriptions.js" type="text/javascript"></script> <script src="../../../../../cloud.js" type="text/javascript"></script> <title>ABA Route Transit Number Validator 1.0.1-SNAPSHOT</title> </head> <body> <div id="page"> <header id="header" role="banner"> <nav class="aui-header aui-dropdown2-trigger-group" role="navigation"> <div class="aui-header-inner"> <div class="aui-header-primary"> <h1 id="logo" class="aui-header-logo aui-header-logo-clover"> <a href="http://openclover.org" title="Visit OpenClover home page"><span class="aui-header-logo-device">OpenClover</span></a> </h1> </div> <div class="aui-header-secondary"> <ul class="aui-nav"> <li id="system-help-menu"> <a class="aui-nav-link" title="Open online documentation" target="_blank" href="http://openclover.org/documentation"> <span class="aui-icon aui-icon-small aui-iconfont-help">&#160;Help</span> </a> </li> </ul> </div> </div> </nav> </header> <div class="aui-page-panel"> <div class="aui-page-panel-inner"> <div class="aui-page-panel-nav aui-page-panel-nav-clover"> <div class="aui-page-header-inner" style="margin-bottom: 20px;"> <div class="aui-page-header-image"> <a href="http://cardatechnologies.com" target="_top"> <div class="aui-avatar aui-avatar-large aui-avatar-project"> <div class="aui-avatar-inner"> <img src="../../../../../img/clover_logo_large.png" alt="Clover icon"/> </div> </div> </a> </div> <div class="aui-page-header-main" > <h1> <a href="http://cardatechnologies.com" target="_top"> ABA Route Transit Number Validator 1.0.1-SNAPSHOT </a> </h1> </div> </div> <nav class="aui-navgroup aui-navgroup-vertical"> <div class="aui-navgroup-inner"> <ul class="aui-nav"> <li class=""> <a href="../../../../../dashboard.html">Project overview</a> </li> </ul> <div class="aui-nav-heading packages-nav-heading"> <strong>Packages</strong> </div> <div class="aui-nav project-packages"> <form method="get" action="#" class="aui package-filter-container"> <input type="text" autocomplete="off" class="package-filter text" placeholder="Type to filter packages..." name="package-filter" id="package-filter" title="Start typing package name (or part of the name) to search through the tree. Use arrow keys and the Enter key to navigate."/> </form> <p class="package-filter-no-results-message hidden"> <small>No results found.</small> </p> <div class="packages-tree-wrapper" data-root-relative="../../../../../" data-package-name="com.cardatechnologies.utils.validators.abaroutevalidator"> <div class="packages-tree-container"></div> <div class="clover-packages-lozenges"></div> </div> </div> </div> </nav> </div> <section class="aui-page-panel-content"> <div class="aui-page-panel-content-clover"> <div class="aui-page-header-main"><ol class="aui-nav aui-nav-breadcrumbs"> <li><a href="../../../../../dashboard.html"> Project Clover database Sat Aug 7 2021 12:29:33 MDT</a></li> <li><a href="test-pkg-summary.html">Package com.cardatechnologies.utils.validators.abaroutevalidator</a></li> <li><a href="test-Test_AbaRouteValidator_03.html">Class Test_AbaRouteValidator_03</a></li> </ol></div> <h1 class="aui-h2-clover"> Test testAbaNumberCheck_3354_bad </h1> <table class="aui"> <thead> <tr> <th>Test</th> <th><label title="The test result. Either a Pass, Fail or Error.">Status</label></th> <th><label title="When the test execution was started">Start time</label></th> <th><label title="The total time in seconds taken to run this test.">Time (seconds)</label></th> <th><label title="A failure or error message if the test is not successful.">Message</label></th> </tr> </thead> <tbody> <tr> <td> <a href="../../../../../com/cardatechnologies/utils/validators/abaroutevalidator/Test_AbaRouteValidator_03.html?line=20561#src-20561" >testAbaNumberCheck_3354_bad</a> </td> <td> <span class="sortValue">1</span><span class="aui-lozenge aui-lozenge-success">PASS</span> </td> <td> 7 Aug 12:33:00 </td> <td> 0.001 </td> <td> <div></div> <div class="errorMessage"></div> </td> </tr> </tbody> </table> <div>&#160;</div> <table class="aui aui-table-sortable"> <thead> <tr> <th style="white-space:nowrap;"><label title="A class that was directly hit by this test.">Target Class</label></th> <th colspan="4"><label title="The percentage of coverage contributed by each single test.">Coverage contributed by</label> testAbaNumberCheck_3354_bad</th> </tr> </thead> <tbody> <tr> <td> <span class="sortValue">com.cardatechnologies.utils.validators.abaroutevalidator.exceptions.AbaRouteValidationException</span> &#160;&#160;<a href="../../../../../com/cardatechnologies/utils/validators/abaroutevalidator/exceptions/AbaRouteValidationException.html?id=44507#AbaRouteValidationException" title="AbaRouteValidationException" name="sl-43">com.cardatechnologies.utils.validators.abaroutevalidator.exceptions.AbaRouteValidationException</a> </td> <td> <span class="sortValue">0.5714286</span>57.1% </td> <td class="align-middle" style="width: 100%" colspan="3"> <div> <div title="57.1% Covered" style="min-width:40px;" class="barNegative contribBarNegative contribBarNegative"><div class="barPositive contribBarPositive contribBarPositive" style="width:57.1%"></div></div></div> </td> </tr> <tr> <td> <span class="sortValue">com.cardatechnologies.utils.validators.abaroutevalidator.ErrorCodes</span> &#160;&#160;<a href="../../../../../com/cardatechnologies/utils/validators/abaroutevalidator/ErrorCodes.html?id=44507#ErrorCodes" title="ErrorCodes" name="sl-42">com.cardatechnologies.utils.validators.abaroutevalidator.ErrorCodes</a> </td> <td> <span class="sortValue">0.5714286</span>57.1% </td> <td class="align-middle" style="width: 100%" colspan="3"> <div> <div title="57.1% Covered" style="min-width:40px;" class="barNegative contribBarNegative contribBarNegative"><div class="barPositive contribBarPositive contribBarPositive" style="width:57.1%"></div></div></div> </td> </tr> <tr> <td> <span class="sortValue">com.cardatechnologies.utils.validators.abaroutevalidator.AbaRouteValidator</span> &#160;&#160;<a href="../../../../../com/cardatechnologies/utils/validators/abaroutevalidator/AbaRouteValidator.html?id=44507#AbaRouteValidator" title="AbaRouteValidator" name="sl-47">com.cardatechnologies.utils.validators.abaroutevalidator.AbaRouteValidator</a> </td> <td> <span class="sortValue">0.29411766</span>29.4% </td> <td class="align-middle" style="width: 100%" colspan="3"> <div> <div title="29.4% Covered" style="min-width:40px;" class="barNegative contribBarNegative contribBarNegative"><div class="barPositive contribBarPositive contribBarPositive" style="width:29.4%"></div></div></div> </td> </tr> </tbody> </table> </div> <!-- class="aui-page-panel-content-clover" --> <footer id="footer" role="contentinfo"> <section class="footer-body"> <ul> <li> Report generated by <a target="_new" href="http://openclover.org">OpenClover</a> v 4.4.1 on Sat Aug 7 2021 12:49:26 MDT using coverage data from Sat Aug 7 2021 12:47:23 MDT. </li> </ul> <ul> <li>OpenClover is free and open-source software. </li> </ul> </section> </footer> </section> <!-- class="aui-page-panel-content" --> </div> <!-- class="aui-page-panel-inner" --> </div> <!-- class="aui-page-panel" --> </div> <!-- id="page" --> </body> </html>
apache-2.0
mdoering/backbone
life/Plantae/Magnoliophyta/Magnoliopsida/Laurales/Lauraceae/Litsea/Litsea kerrii/README.md
172
# Litsea kerrii Kosterm. SPECIES #### Status ACCEPTED #### According to International Plant Names Index #### Published in null #### Original name null ### Remarks null
apache-2.0
leleuj/cas
core/cas-server-core-validation/src/test/java/org/apereo/cas/validation/AuthenticationPolicyAwareServiceTicketValidationAuthorizerTests.java
11612
package org.apereo.cas.validation; import org.apereo.cas.TestOneTimePasswordAuthenticationHandler; import org.apereo.cas.authentication.AcceptUsersAuthenticationHandler; import org.apereo.cas.authentication.AuthenticationHandler; import org.apereo.cas.authentication.AuthenticationPolicy; import org.apereo.cas.authentication.CoreAuthenticationTestUtils; import org.apereo.cas.authentication.Credential; import org.apereo.cas.authentication.DefaultAuthenticationEventExecutionPlan; import org.apereo.cas.authentication.credential.OneTimePasswordCredential; import org.apereo.cas.authentication.credential.UsernamePasswordCredential; import org.apereo.cas.authentication.handler.support.SimpleTestUsernamePasswordAuthenticationHandler; import org.apereo.cas.authentication.policy.AllAuthenticationHandlersSucceededAuthenticationPolicy; import org.apereo.cas.authentication.policy.AllCredentialsValidatedAuthenticationPolicy; import org.apereo.cas.authentication.policy.AtLeastOneCredentialValidatedAuthenticationPolicy; import org.apereo.cas.authentication.policy.RequiredHandlerAuthenticationPolicy; import org.apereo.cas.config.CasCoreAuthenticationConfiguration; import org.apereo.cas.config.CasCoreAuthenticationHandlersConfiguration; import org.apereo.cas.config.CasCoreAuthenticationMetadataConfiguration; import org.apereo.cas.config.CasCoreAuthenticationPolicyConfiguration; import org.apereo.cas.config.CasCoreAuthenticationPrincipalConfiguration; import org.apereo.cas.config.CasCoreAuthenticationSupportConfiguration; import org.apereo.cas.config.CasCoreConfiguration; import org.apereo.cas.config.CasCoreHttpConfiguration; import org.apereo.cas.config.CasCoreServicesAuthenticationConfiguration; import org.apereo.cas.config.CasCoreServicesConfiguration; import org.apereo.cas.config.CasCoreTicketCatalogConfiguration; import org.apereo.cas.config.CasCoreTicketIdGeneratorsConfiguration; import org.apereo.cas.config.CasCoreTicketsConfiguration; import org.apereo.cas.config.CasCoreUtilConfiguration; import org.apereo.cas.config.CasCoreWebConfiguration; import org.apereo.cas.config.CasPersonDirectoryTestConfiguration; import org.apereo.cas.config.CasRegisteredServicesTestConfiguration; import org.apereo.cas.config.support.CasWebApplicationServiceFactoryConfiguration; import org.apereo.cas.logout.config.CasCoreLogoutConfiguration; import org.apereo.cas.services.ServicesManager; import lombok.val; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.function.Executable; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Qualifier; import org.springframework.boot.autoconfigure.mail.MailSenderAutoConfiguration; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.cloud.autoconfigure.RefreshAutoConfiguration; import org.springframework.context.ConfigurableApplicationContext; import org.springframework.mock.web.MockHttpServletRequest; import java.util.List; import java.util.Map; import java.util.stream.Collectors; import static org.junit.jupiter.api.Assertions.*; import static org.mockito.Mockito.*; /** * This is {@link AuthenticationPolicyAwareServiceTicketValidationAuthorizerTests}. * * @author Misagh Moayyed * @since 6.2.0 */ @SpringBootTest(classes = { RefreshAutoConfiguration.class, CasPersonDirectoryTestConfiguration.class, CasRegisteredServicesTestConfiguration.class, CasCoreAuthenticationConfiguration.class, CasCoreServicesAuthenticationConfiguration.class, CasCoreAuthenticationPrincipalConfiguration.class, CasCoreAuthenticationPolicyConfiguration.class, CasCoreAuthenticationMetadataConfiguration.class, CasCoreAuthenticationSupportConfiguration.class, CasCoreAuthenticationHandlersConfiguration.class, CasCoreWebConfiguration.class, CasCoreHttpConfiguration.class, CasCoreUtilConfiguration.class, CasCoreTicketsConfiguration.class, CasCoreTicketCatalogConfiguration.class, CasCoreTicketIdGeneratorsConfiguration.class, CasCoreLogoutConfiguration.class, CasCoreConfiguration.class, CasCoreServicesConfiguration.class, CasWebApplicationServiceFactoryConfiguration.class, MailSenderAutoConfiguration.class }) public class AuthenticationPolicyAwareServiceTicketValidationAuthorizerTests { @Autowired @Qualifier("servicesManager") private ServicesManager servicesManager; @Autowired private ConfigurableApplicationContext applicationContext; private static Assertion getAssertion(final Map<Credential, ? extends AuthenticationHandler> handlers) { val assertion = mock(Assertion.class); val principal = CoreAuthenticationTestUtils.getPrincipal("casuser"); val authentication = CoreAuthenticationTestUtils.getAuthenticationBuilder(principal, handlers, Map.of(AuthenticationHandler.SUCCESSFUL_AUTHENTICATION_HANDLERS, handlers.values().stream().map(AuthenticationHandler::getName).collect(Collectors.toList()))).build(); when(assertion.getPrimaryAuthentication()).thenReturn(authentication); return assertion; } private static SimpleTestUsernamePasswordAuthenticationHandler getSimpleTestAuthenticationHandler() { return new SimpleTestUsernamePasswordAuthenticationHandler(); } private static AcceptUsersAuthenticationHandler getAcceptUsersAuthenticationHandler() { return new AcceptUsersAuthenticationHandler(Map.of("casuser", "Mellon")); } private static OneTimePasswordCredential getOtpCredential() { return new OneTimePasswordCredential("test", "123456789"); } private static TestOneTimePasswordAuthenticationHandler getTestOtpAuthenticationHandler() { return new TestOneTimePasswordAuthenticationHandler(Map.of("casuser", "123456789")); } @Test public void verifyAllAuthenticationHandlersSucceededAuthenticationPolicy() { val handlers = List.of(getTestOtpAuthenticationHandler(), getAcceptUsersAuthenticationHandler(), getSimpleTestAuthenticationHandler()); val service = CoreAuthenticationTestUtils.getService("https://example.com/high/"); val authz = getAuthorizer(new AllAuthenticationHandlersSucceededAuthenticationPolicy(), handlers); val map = (Map) Map.of( new UsernamePasswordCredential(), getAcceptUsersAuthenticationHandler(), getOtpCredential(), getTestOtpAuthenticationHandler()); val assertion = getAssertion(map); assertDoesNotThrow(new Executable() { @Override public void execute() { authz.authorize(new MockHttpServletRequest(), service, assertion); } }); } @Test public void verifyAllCredentialsValidatedAuthenticationPolicy() { val handlers = List.of(getTestOtpAuthenticationHandler(), getAcceptUsersAuthenticationHandler(), getSimpleTestAuthenticationHandler()); val service = CoreAuthenticationTestUtils.getService("https://example.com/high/"); val authz = getAuthorizer(new AllCredentialsValidatedAuthenticationPolicy(), handlers); val map = (Map) Map.of( new UsernamePasswordCredential(), getAcceptUsersAuthenticationHandler(), getOtpCredential(), getTestOtpAuthenticationHandler()); val assertion = getAssertion(map); assertDoesNotThrow(new Executable() { @Override public void execute() { authz.authorize(new MockHttpServletRequest(), service, assertion); } }); } @Test public void verifyRequiredHandlerAuthenticationPolicy() { val handler = getAcceptUsersAuthenticationHandler(); val handlers = List.of(getTestOtpAuthenticationHandler(), handler, getSimpleTestAuthenticationHandler()); val service = CoreAuthenticationTestUtils.getService("https://example.com/high/"); val authz = getAuthorizer(new RequiredHandlerAuthenticationPolicy(handler.getName()), handlers); val map = (Map) Map.of( new UsernamePasswordCredential(), handler, getOtpCredential(), getTestOtpAuthenticationHandler()); val assertion = getAssertion(map); assertDoesNotThrow(new Executable() { @Override public void execute() { authz.authorize(new MockHttpServletRequest(), service, assertion); } }); } @Test public void verifyRequiredHandlerAuthenticationPolicyTryAll() { val handler = getAcceptUsersAuthenticationHandler(); val handlers = List.of(getTestOtpAuthenticationHandler(), handler, getSimpleTestAuthenticationHandler()); val service = CoreAuthenticationTestUtils.getService("https://example.com/high/"); val authz = getAuthorizer(new RequiredHandlerAuthenticationPolicy(handler.getName(), true), handlers); val map = (Map) Map.of( new UsernamePasswordCredential(), handler, getOtpCredential(), getTestOtpAuthenticationHandler()); val assertion = getAssertion(map); assertDoesNotThrow(new Executable() { @Override public void execute() { authz.authorize(new MockHttpServletRequest(), service, assertion); } }); } @Test public void verifyOperationWithHandlersAndAtLeastOneCredential() { val handlers = List.of(getTestOtpAuthenticationHandler(), getAcceptUsersAuthenticationHandler(), getSimpleTestAuthenticationHandler()); val service = CoreAuthenticationTestUtils.getService("https://example.com/high/"); val authz = getAuthorizer(new AtLeastOneCredentialValidatedAuthenticationPolicy(), handlers); val map = (Map) Map.of( new UsernamePasswordCredential(), getAcceptUsersAuthenticationHandler(), getOtpCredential(), getTestOtpAuthenticationHandler()); val assertion = getAssertion(map); assertDoesNotThrow(new Executable() { @Override public void execute() { authz.authorize(new MockHttpServletRequest(), service, assertion); } }); } @Test public void verifyOperationWithHandlersAndAtLeastOneCredentialMustTryAll() { val handlers = List.of(getTestOtpAuthenticationHandler(), getAcceptUsersAuthenticationHandler(), getSimpleTestAuthenticationHandler()); val service = CoreAuthenticationTestUtils.getService("https://example.com/high/"); val authz = getAuthorizer(new AtLeastOneCredentialValidatedAuthenticationPolicy(true), handlers); val map = (Map) Map.of( new UsernamePasswordCredential(), getAcceptUsersAuthenticationHandler(), getOtpCredential(), getTestOtpAuthenticationHandler()); val assertion = getAssertion(map); assertDoesNotThrow(new Executable() { @Override public void execute() { authz.authorize(new MockHttpServletRequest(), service, assertion); } }); } private ServiceTicketValidationAuthorizer getAuthorizer(final AuthenticationPolicy policy, final List<? extends AuthenticationHandler> authenticationHandlers) { val plan = new DefaultAuthenticationEventExecutionPlan(); plan.registerAuthenticationHandlers(authenticationHandlers); plan.registerAuthenticationPolicy(policy); return new AuthenticationPolicyAwareServiceTicketValidationAuthorizer(servicesManager, plan, applicationContext); } }
apache-2.0
krestenkrab/hotruby
modules/vm-shared/ruby/2.0/test/unit/testresult.rb
2086
# :nodoc: # # Author:: Nathaniel Talbott. # Copyright:: Copyright (c) 2000-2002 Nathaniel Talbott. All rights reserved. # License:: Ruby license. require 'test/unit/util/observable' module Test module Unit # Collects Test::Unit::Failure and Test::Unit::Error so that # they can be displayed to the user. To this end, observers # can be added to it, allowing the dynamic updating of, say, a # UI. class TestResult include Util::Observable CHANGED = "CHANGED" FAULT = "FAULT" attr_reader(:run_count, :assertion_count) # Constructs a new, empty TestResult. def initialize @run_count, @assertion_count = 0, 0 @failures, @errors = Array.new, Array.new end # Records a test run. def add_run @run_count += 1 notify_listeners(CHANGED, self) end # Records a Test::Unit::Failure. def add_failure(failure) @failures << failure notify_listeners(FAULT, failure) notify_listeners(CHANGED, self) end # Records a Test::Unit::Error. def add_error(error) @errors << error notify_listeners(FAULT, error) notify_listeners(CHANGED, self) end # Records an individual assertion. def add_assertion @assertion_count += 1 notify_listeners(CHANGED, self) end # Returns a string contain the recorded runs, assertions, # failures and errors in this TestResult. def to_s "#{run_count} tests, #{assertion_count} assertions, #{failure_count} failures, #{error_count} errors" end # Returns whether or not this TestResult represents # successful completion. def passed? return @failures.empty? && @errors.empty? end # Returns the number of failures this TestResult has # recorded. def failure_count return @failures.size end # Returns the number of errors this TestResult has # recorded. def error_count return @errors.size end end end end
apache-2.0
TNG/ArchUnit
archunit/src/test/java/com/tngtech/archunit/core/importer/testexamples/hierarchicalmethodcall/SuperclassWithCalledMethod.java
275
package com.tngtech.archunit.core.importer.testexamples.hierarchicalmethodcall; public class SuperclassWithCalledMethod { public static final String method = "method"; String method() { return null; } int maskedMethod() { return 0; } }
apache-2.0
greghogan/flink
flink-runtime/src/main/java/org/apache/flink/runtime/scheduler/DefaultSchedulerFactory.java
4493
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.flink.runtime.scheduler; import org.apache.flink.api.common.time.Time; import org.apache.flink.configuration.Configuration; import org.apache.flink.runtime.blob.BlobWriter; import org.apache.flink.runtime.checkpoint.CheckpointRecoveryFactory; import org.apache.flink.runtime.concurrent.ScheduledExecutorServiceAdapter; import org.apache.flink.runtime.executiongraph.failover.flip1.FailoverStrategyFactoryLoader; import org.apache.flink.runtime.executiongraph.failover.flip1.RestartBackoffTimeStrategy; import org.apache.flink.runtime.executiongraph.failover.flip1.RestartBackoffTimeStrategyFactoryLoader; import org.apache.flink.runtime.io.network.partition.JobMasterPartitionTracker; import org.apache.flink.runtime.jobgraph.JobGraph; import org.apache.flink.runtime.jobmaster.ExecutionDeploymentTracker; import org.apache.flink.runtime.jobmaster.slotpool.SlotPool; import org.apache.flink.runtime.metrics.groups.JobManagerJobMetricGroup; import org.apache.flink.runtime.rest.handler.legacy.backpressure.BackPressureStatsTracker; import org.apache.flink.runtime.shuffle.ShuffleMaster; import org.slf4j.Logger; import java.util.concurrent.Executor; import java.util.concurrent.ScheduledExecutorService; import static org.apache.flink.runtime.scheduler.DefaultSchedulerComponents.createSchedulerComponents; /** * Factory for {@link DefaultScheduler}. */ public class DefaultSchedulerFactory implements SchedulerNGFactory { @Override public SchedulerNG createInstance( final Logger log, final JobGraph jobGraph, final BackPressureStatsTracker backPressureStatsTracker, final Executor ioExecutor, final Configuration jobMasterConfiguration, final SlotPool slotPool, final ScheduledExecutorService futureExecutor, final ClassLoader userCodeLoader, final CheckpointRecoveryFactory checkpointRecoveryFactory, final Time rpcTimeout, final BlobWriter blobWriter, final JobManagerJobMetricGroup jobManagerJobMetricGroup, final Time slotRequestTimeout, final ShuffleMaster<?> shuffleMaster, final JobMasterPartitionTracker partitionTracker, final ExecutionDeploymentTracker executionDeploymentTracker, long initializationTimestamp) throws Exception { final DefaultSchedulerComponents schedulerComponents = createSchedulerComponents( jobGraph.getScheduleMode(), jobGraph.isApproximateLocalRecoveryEnabled(), jobMasterConfiguration, slotPool, slotRequestTimeout); final RestartBackoffTimeStrategy restartBackoffTimeStrategy = RestartBackoffTimeStrategyFactoryLoader .createRestartBackoffTimeStrategyFactory( jobGraph .getSerializedExecutionConfig() .deserializeValue(userCodeLoader) .getRestartStrategy(), jobMasterConfiguration, jobGraph.isCheckpointingEnabled()) .create(); log.info("Using restart back off time strategy {} for {} ({}).", restartBackoffTimeStrategy, jobGraph.getName(), jobGraph.getJobID()); return new DefaultScheduler( log, jobGraph, backPressureStatsTracker, ioExecutor, jobMasterConfiguration, schedulerComponents.getStartUpAction(), futureExecutor, new ScheduledExecutorServiceAdapter(futureExecutor), userCodeLoader, checkpointRecoveryFactory, rpcTimeout, blobWriter, jobManagerJobMetricGroup, shuffleMaster, partitionTracker, schedulerComponents.getSchedulingStrategyFactory(), FailoverStrategyFactoryLoader.loadFailoverStrategyFactory(jobMasterConfiguration), restartBackoffTimeStrategy, new DefaultExecutionVertexOperations(), new ExecutionVertexVersioner(), schedulerComponents.getAllocatorFactory(), executionDeploymentTracker, initializationTimestamp); } }
apache-2.0
conecenter/c4proto
base_lib/src/main/scala/ee/cone/c4gate/HttpUtilApi.scala
1098
package ee.cone.c4gate import okio.ByteString case class HttpResponse(status: Int, headers: Map[String, List[String]], body: ByteString) trait HttpUtil { def get(url: String, headers: List[(String, String)]): HttpResponse def post(url: String, headers: List[(String, String)]): Unit def post(url: String, headers: List[(String, String)], body: ByteString, timeOut: Option[Int], expectCode: Int): Unit def put(url: String, headers: List[(String, String)], body: ByteString, timeOut: Option[Int]): Int def put(url: String, headers: List[(String, String)], body: ByteString): Int } object HttpUtil { sealed trait HttpMethod { def ==: (that: String): Boolean = that == this.toString } object HttpMethod { case object PUT extends HttpMethod case object POST extends HttpMethod case object GET extends HttpMethod case object OPTIONS extends HttpMethod case object HEAD extends HttpMethod case object PATCH extends HttpMethod case object DELETE extends HttpMethod case object TRACE extends HttpMethod case object CONNECT extends HttpMethod } }
apache-2.0
dlukashev/AndroidSlidingUpPanel-foursquare-map-demo
app/src/main/java/com/amberfog/mapslidingtest/app/LockableRecyclerView.java
2219
/** * Copyright 2015-present Amberfog * <p/> * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * <p/> * http://www.apache.org/licenses/LICENSE-2.0 * <p/> * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.amberfog.mapslidingtest.app; import android.content.Context; import android.support.v7.widget.RecyclerView; import android.util.AttributeSet; import android.view.MotionEvent; import android.widget.ListView; public class LockableRecyclerView extends RecyclerView { private boolean mScrollable = true; public LockableRecyclerView(Context context) { super(context); } public LockableRecyclerView(Context context, AttributeSet attrs) { super(context, attrs); } public LockableRecyclerView(Context context, AttributeSet attrs, int defStyle) { super(context, attrs, defStyle); } public void setScrollingEnabled(boolean enabled) { mScrollable = enabled; } @Override public boolean onTouchEvent(MotionEvent ev) { switch (ev.getAction()) { case MotionEvent.ACTION_DOWN: // if we can scroll pass the event to the superclass if (mScrollable) { return super.onTouchEvent(ev); } // only continue to handle the touch event if scrolling enabled return mScrollable; // mScrollable is always false at this point default: return super.onTouchEvent(ev); } } @Override public boolean onInterceptTouchEvent(MotionEvent ev) { // Don't do anything with intercepted touch events if // we are not scrollable if (!mScrollable) { return false; } else { return super.onInterceptTouchEvent(ev); } } }
apache-2.0
tongjinlv/trtos
USER/Prj_TankCtr/TankCtr1_main.c
1917
/*******************************************ÉêÃ÷*************************************** ±¾Ç¶Èëʽ²Ù×÷ϵͳδ¾­ÊÚȨ£¬½ûÖ¹Ó¦ÓÃÓÚÈκÎÉÌÒµÓÃ; °æÈ¨ËùÓУ¬ÇÖȨ±Ø¾¿ http://www.trtos.com/ **************************************************************************************/ #include <Include.h> #include <Driver_Include.h> #include <Tools_Include.h> #include <GUI_Include.h> #include <GUI_Include.h> #include <FuncTask_Include.h> #include <Sensor_Driver.h> #include <..\USER\Prj_TankCtr\TankCom.h> #include <..\USER\Prj_TankCtr\IO_Driver.c> //#include <..\USER\Prj_TankCtr\TankCom.c> #include <..\USER\Prj_TankCtr\App_TankCtr1.c> #include <..\Driver\SZZigBee_Driver.c> uint8 LinkCount; void Task0(void *Tags) { LED2_OFF; LED3_OFF; DeBug_OutSet(DeBug_USART1);//µ÷ÊÔÐÅϢͨ¹ý´®¿Ú´òÓ¡ DeBug_SetType(Infor_All);//Ö»´òÓ¡InforÀàÐ͵ÄÏûÏ¢ Printf_SysInfor(); Remap_SWJ_JTAGDisable(); CTR_SetUint32(0); DevRun_Value.AllStatus=0; LED1_ON; Tos_TaskDelay(2000); while(1) { LED1_ON; Tos_TaskDelay(500); LED1_OFF; Tos_TaskDelay(500); } } void Task_CAN_SIGMY(void *Tags) { uint8 Data[10]; while(1) { LED3_ON; Uint32ToByte(&Data[0],SIG_GetUint32()); CAN_SIG_SendMsg(&Data[0],0x90); Tos_TaskDelay(100); LED3_OFF; Tos_TaskDelay(100); } } void CAN_Istr(CanRxMsg *RxMessage) { switch(RxMessage->StdId) { case 0x91:App_SetButtonDelay(RxMessage->Data[0]);break; default:break; } } uint8 MemaBuf[40000]; int main(void) { Mema_Init(&MemaBuf[0]); DeviceList_Init(); DeviceMount_Usart1(); DeviceMount_Control();//¹ÒÔÚÊä³öÉ豸 DeviceMount_Signel();//¹ÒÔÚÐźŲɼ¯É豸 DeviceMount_Can1(); Tos_Init(); Tos_TaskCreate(0,Task0,Null,"Tast0",3000); Tos_TaskCreate(2,App_TankButton,Null,"Tast2",3000); Tos_TaskCreate(3,Task_CAN_SIGMY,Null,"Tast2",3000); Tos_Start(); while(1); }
apache-2.0
slimkit/thinksns-plus
resources/spa/src/util/markdown.js
2304
import markdownIt from 'markdown-it' import plusImagePlugin from 'markdown-it-plus-image' import highlight from 'highlight.js' import container from 'markdown-it-container' import { baseURL } from '@/api' /** * Create a markdown it instance. * * @type {Object} */ export const markdown = markdownIt({ breaks: true, html: true, highlight: function (code) { return highlight ? highlight.highlightAuto(code).value : code }, }).use(plusImagePlugin, `${baseURL}/files/`) .use(container, 'hljs-left') /* align left */ .use(container, 'hljs-center')/* align center */ .use(container, 'hljs-right') /** * Markdown render. * * @param {string} markdownText * @return {String} * @author Seven Du <[email protected]> */ export function render (markdownText) { return markdown.render(String(markdownText)) } /** * Synyax Text AND images. * * @param {string} markdownText * @return {Object: { text: String, images: Array }} * @author Seven Du <[email protected]> */ export function syntaxTextAndImage (markdownText) { /** * Get markdown text rende to HTML string. * * @type {string} */ const html = render(markdownText) /** * Match all images HTML code in `html` * * @type {Array} */ const imageHtmlCodes = html.match(/<img.*?(?:>|\/>)/gi) /** * Images. * * @type {Array} */ let images = [] // For each all image. if (imageHtmlCodes instanceof Array) { imageHtmlCodes.forEach(function (imageHtmlCode) { /** * Match img HTML tag src attr. * * @type {Array} */ let result = imageHtmlCode.match(/src=['"]?([^'"]*)['"]?/i) // If matched push to images array. if (result !== null && result[1]) { images.push(result[1]) } }) } /** * Replace all HTML tag to '', And replace img HTML tag to "[图片]" * * @type {string} */ const text = html .replace(/<img.*?(?:>|\/>)/gi, '[图片]') // Replace img HTML tag to "[图片]" .replace(/<\/?.+?>/gi, '') // Removed all HTML tags. .replace(/ /g, '') // Removed all empty character. // Return all matched result. // { // images: Array, // text: string // } return { images, text } } /** * Export default, export render function. */ export default render
apache-2.0
luffyke/springboot-demo
src/main/java/org/smartx/demo/domain/UserRepository.java
214
package org.smartx.demo.domain; import org.springframework.stereotype.Repository; /** * <p> * * </p> * * <b>Creation Time:</b> 16/11/23 * * @author kext */ @Repository public interface UserRepository { }
apache-2.0
wecatch/app-turbo
demos/jinja2-support/apps/app/__init__.py
253
from turbo import register import app import api register.register_group_urls('', [ ('/', app.HomeHandler), ('/plus', app.IncHandler), ('/minus', app.MinusHandler), ]) register.register_group_urls('/v1', [ ('', api.HomeHandler), ])
apache-2.0
LQJJ/demo
126-go-common-master/app/job/main/credit-timer/dao/dao.go
530
package dao import ( "context" "go-common/app/job/main/credit-timer/conf" "go-common/library/database/sql" ) // Dao struct info of Dao. type Dao struct { db *sql.DB c *conf.Config } // New new a Dao and return. func New(c *conf.Config) (d *Dao) { d = &Dao{ c: c, db: sql.NewMySQL(c.Mysql), } return } // Close close connections of mc, redis, db. func (d *Dao) Close() { if d.db != nil { d.db.Close() } } // Ping ping health of db. func (d *Dao) Ping(c context.Context) (err error) { return d.db.Ping(c) }
apache-2.0
pdrados/cas
api/cas-server-core-api-configuration-model/src/main/java/org/apereo/cas/configuration/metadata/ConfigurationMetadataFieldVisitor.java
4907
package org.apereo.cas.configuration.metadata; import org.apereo.cas.configuration.model.core.authentication.PasswordPolicyProperties; import org.apereo.cas.configuration.model.core.authentication.PrincipalTransformationProperties; import org.apereo.cas.configuration.model.support.ldap.AbstractLdapProperties; import org.apereo.cas.configuration.model.support.ldap.LdapSearchEntryHandlersProperties; import org.apereo.cas.util.model.Capacity; import org.apereo.cas.util.model.TriStateBoolean; import com.github.javaparser.ast.Modifier; import com.github.javaparser.ast.body.FieldDeclaration; import com.github.javaparser.ast.type.ClassOrInterfaceType; import com.github.javaparser.ast.visitor.VoidVisitorAdapter; import lombok.RequiredArgsConstructor; import lombok.extern.slf4j.Slf4j; import lombok.val; import org.apereo.services.persondir.support.QueryType; import org.apereo.services.persondir.util.CaseCanonicalizationMode; import org.springframework.boot.configurationmetadata.ConfigurationMetadataProperty; import org.springframework.core.io.Resource; import java.util.List; import java.util.Map; import java.util.Set; import java.util.regex.Pattern; /** * This is {@link ConfigurationMetadataFieldVisitor}. * * @author Misagh Moayyed * @since 6.0.0 */ @Slf4j @RequiredArgsConstructor public class ConfigurationMetadataFieldVisitor extends VoidVisitorAdapter<ConfigurationMetadataProperty> { private static final Pattern EXCLUDED_TYPES; static { EXCLUDED_TYPES = Pattern.compile( String.class.getSimpleName() + '|' + Integer.class.getSimpleName() + '|' + Double.class.getSimpleName() + '|' + Long.class.getSimpleName() + '|' + Float.class.getSimpleName() + '|' + Boolean.class.getSimpleName() + '|' + PrincipalTransformationProperties.CaseConversion.class.getSimpleName() + '|' + QueryType.class.getSimpleName() + '|' + AbstractLdapProperties.LdapType.class.getSimpleName() + '|' + CaseCanonicalizationMode.class.getSimpleName() + '|' + TriStateBoolean.class.getSimpleName() + '|' + Capacity.class.getSimpleName() + '|' + PasswordPolicyProperties.PasswordPolicyHandlingOptions.class.getSimpleName() + '|' + LdapSearchEntryHandlersProperties.SearchEntryHandlerTypes.class.getSimpleName() + '|' + Map.class.getSimpleName() + '|' + Resource.class.getSimpleName() + '|' + List.class.getSimpleName() + '|' + Set.class.getSimpleName()); } private final Set<ConfigurationMetadataProperty> properties; private final Set<ConfigurationMetadataProperty> groups; private final boolean indexNameWithBrackets; private final String parentClass; private final String sourcePath; private static boolean shouldTypeBeExcluded(final ClassOrInterfaceType type) { return EXCLUDED_TYPES.matcher(type.getNameAsString()).matches(); } @Override public void visit(final FieldDeclaration field, final ConfigurationMetadataProperty property) { if (field.getVariables().isEmpty()) { throw new IllegalArgumentException("Field " + field + " has no variable definitions"); } val variable = field.getVariable(0); if (field.getModifiers().contains(Modifier.staticModifier())) { LOGGER.debug("Field [{}] is static and will be ignored for metadata generation", variable.getNameAsString()); return; } if (field.getJavadoc().isEmpty()) { LOGGER.error("Field [{}] has no Javadoc defined", field); } val creator = new ConfigurationMetadataPropertyCreator(indexNameWithBrackets, properties, groups, parentClass); val prop = creator.createConfigurationProperty(field, property.getName()); processNestedClassOrInterfaceTypeIfNeeded(field, prop); } private void processNestedClassOrInterfaceTypeIfNeeded(final FieldDeclaration n, final ConfigurationMetadataProperty prop) { if (n.getElementType() instanceof ClassOrInterfaceType) { val type = (ClassOrInterfaceType) n.getElementType(); if (!shouldTypeBeExcluded(type)) { val instance = ConfigurationMetadataClassSourceLocator.getInstance(); val clz = instance.locatePropertiesClassForType(type); if (clz != null && !clz.isMemberClass()) { val typePath = ConfigurationMetadataClassSourceLocator.buildTypeSourcePath(this.sourcePath, clz.getName()); val parser = new ConfigurationMetadataUnitParser(this.sourcePath); parser.parseCompilationUnit(properties, groups, prop, typePath, clz.getName(), false); } } } } }
apache-2.0
YUKAI/konashi-ios-sdk
samples/I2cSample/I2cSample/ViewController.h
334
// // ViewController.h // I2cSample // // Created on 12/26/12. // Copyright (c) 2012 Yukai Engineering. All rights reserved. // #import <UIKit/UIKit.h> @interface ViewController : UIViewController @property (weak, nonatomic) IBOutlet UILabel *statusMessage; - (IBAction)find:(id)sender; - (IBAction)clearLcd:(id)sender; @end
apache-2.0
googleapis/google-cloud-cpp
google/cloud/serviceusage/quickstart/README.md
6675
# HOWTO: using the Service Usage API C++ client in your project This directory contains small examples showing how to use the Service Usage API C++ client library in your own project. These instructions assume that you have some experience as a C++ developer and that you have a working C++ toolchain (compiler, linker, etc.) installed on your platform. * Packaging maintainers or developers who prefer to install the library in a fixed directory (such as `/usr/local` or `/opt`) should consult the [packaging guide](/doc/packaging.md). * Developers wanting to use the libraries as part of a larger CMake or Bazel project should consult the current document. Note that there are similar documents for each library in their corresponding directories. * Developers wanting to compile the library just to run some of the examples or tests should consult the [building and installing](/README.md#building-and-installing) section of the top-level README file. * Contributors and developers to `google-cloud-cpp` should consult the guide to [setup a development workstation][howto-setup-dev-workstation]. [howto-setup-dev-workstation]: /doc/contributor/howto-guide-setup-development-workstation.md ## Before you begin To run the quickstart examples you will need a working Google Cloud Platform (GCP) project. The [quickstart][quickstart-link] covers the necessary steps in detail. ## Configuring authentication for the C++ Client Library Like most Google Cloud Platform (GCP) services, Service Usage API requires that your application authenticates with the service before accessing any data. If you are not familiar with GCP authentication please take this opportunity to review the [Authentication Overview][authentication-quickstart]. This library uses the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to find the credentials file. For example: | Shell | Command | | :----------------- | ---------------------------------------------- | | Bash/zsh/ksh/etc. | `export GOOGLE_APPLICATION_CREDENTIALS=[PATH]` | | sh | `GOOGLE_APPLICATION_CREDENTIALS=[PATH];`<br> `export GOOGLE_APPLICATION_CREDENTIALS` | | csh/tsch | `setenv GOOGLE_APPLICATION_CREDENTIALS [PATH]` | | Windows Powershell | `$env:GOOGLE_APPLICATION_CREDENTIALS=[PATH]` | | Windows cmd.exe | `set GOOGLE_APPLICATION_CREDENTIALS=[PATH]` | Setting this environment variable is the recommended way to configure the authentication preferences, though if the environment variable is not set, the library searches for a credentials file in the same location as the [Cloud SDK](https://cloud.google.com/sdk/). For more information about *Application Default Credentials*, see https://cloud.google.com/docs/authentication/production ## Using with Bazel > :warning: If you are using Windows or macOS there are additional instructions > at the end of this document. 1. Install Bazel using [the instructions][bazel-install] from the `bazel.build` website. 2. Compile this example using Bazel: ```bash cd $HOME/google-cloud-cpp/google/cloud/serviceusage/quickstart bazel build ... ``` Note that Bazel automatically downloads and compiles all dependencies of the project. As it is often the case with C++ libraries, compiling these dependencies may take several minutes. 3. Run the example, change the place holder(s) to appropriate values: ```bash bazel run :quickstart -- [...] ``` ## Using with CMake > :warning: If you are using Windows or macOS there are additional instructions > at the end of this document. 1. Install CMake. The package managers for most Linux distributions include a package for CMake. Likewise, you can install CMake on Windows using a package manager such as [chocolatey][choco-cmake-link], and on macOS using [homebrew][homebrew-cmake-link]. You can also obtain the software directly from the [cmake.org](https://cmake.org/download/). 2. Install the dependencies with your favorite tools. As an example, if you use [vcpkg](https://github.com/Microsoft/vcpkg.git): ```bash cd $HOME/vcpkg ./vcpkg install google-cloud-cpp[core,serviceusage] ``` Note that, as it is often the case with C++ libraries, compiling these dependencies may take several minutes. 3. Configure CMake, if necessary, configure the directory where you installed the dependencies: ```bash cd $HOME/gooogle-cloud-cpp/google/cloud/serviceusage/quickstart cmake -H. -B.build -DCMAKE_TOOLCHAIN_FILE=$HOME/vcpkg/scripts/buildsystems/vcpkg.cmake cmake --build .build ``` 4. Run the example, change the place holder to appropriate values: ```bash .build/quickstart [...] ``` ## Platform Specific Notes ### macOS gRPC [requires][grpc-roots-pem-bug] an environment variable to configure the trust store for SSL certificates, you can download and configure this using: ```bash curl -Lo roots.pem https://pki.google.com/roots.pem export GRPC_DEFAULT_SSL_ROOTS_FILE_PATH="$PWD/roots.pem" ``` To workaround a [bug in Bazel][bazel-grpc-macos-bug], gRPC requires this flag on macOS builds, you can add the option manually or include it in your `.bazelrc` file: ```bash bazel build --copt=-DGRPC_BAZEL_BUILD ... ``` ### Windows To correctly configure the MSVC runtime you should change the CMake minimum required version to 3.15 or add `-DCMAKE_POLICY_DEFAULT_CMP0091=NEW` to the CMake configuration step. Bazel tends to create very long file names and paths. You may need to use a short directory to store the build output, such as `c:\b`, and instruct Bazel to use it via: ```shell bazel --output_user_root=c:\b build ... ``` gRPC [requires][grpc-roots-pem-bug] an environment variable to configure the trust store for SSL certificates, you can download and configure this using: ```console @powershell -NoProfile -ExecutionPolicy unrestricted -Command ^ (new-object System.Net.WebClient).Downloadfile( ^ 'https://pki.google.com/roots.pem', 'roots.pem') set GRPC_DEFAULT_SSL_ROOTS_FILE_PATH=%cd%\roots.pem ``` [bazel-install]: https://docs.bazel.build/versions/main/install.html [quickstart-link]: https://cloud.google.com/service-usage/docs/getting-started [grpc-roots-pem-bug]: https://github.com/grpc/grpc/issues/16571 [choco-cmake-link]: https://chocolatey.org/packages/cmake [homebrew-cmake-link]: https://formulae.brew.sh/formula/cmake [cmake-download-link]: https://cmake.org/download/ [bazel-grpc-macos-bug]: https://github.com/bazelbuild/bazel/issues/4341 [authentication-quickstart]: https://cloud.google.com/docs/authentication/getting-started 'Authentication Getting Started'
apache-2.0
open-telemetry/opentelemetry-js
packages/opentelemetry-resources/src/detectors/EnvDetector.ts
5327
/* * Copyright The OpenTelemetry Authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import { diag } from '@opentelemetry/api'; import { getEnv } from '@opentelemetry/core'; import { SemanticResourceAttributes } from '@opentelemetry/semantic-conventions'; import { Resource } from '../Resource'; import { Detector, ResourceAttributes } from '../types'; import { ResourceDetectionConfig } from '../config'; /** * EnvDetector can be used to detect the presence of and create a Resource * from the OTEL_RESOURCE_ATTRIBUTES environment variable. */ class EnvDetector implements Detector { // Type, attribute keys, and attribute values should not exceed 256 characters. private readonly _MAX_LENGTH = 255; // OTEL_RESOURCE_ATTRIBUTES is a comma-separated list of attributes. private readonly _COMMA_SEPARATOR = ','; // OTEL_RESOURCE_ATTRIBUTES contains key value pair separated by '='. private readonly _LABEL_KEY_VALUE_SPLITTER = '='; private readonly _ERROR_MESSAGE_INVALID_CHARS = 'should be a ASCII string with a length greater than 0 and not exceed ' + this._MAX_LENGTH + ' characters.'; private readonly _ERROR_MESSAGE_INVALID_VALUE = 'should be a ASCII string with a length not exceed ' + this._MAX_LENGTH + ' characters.'; /** * Returns a {@link Resource} populated with attributes from the * OTEL_RESOURCE_ATTRIBUTES environment variable. Note this is an async * function to conform to the Detector interface. * * @param config The resource detection config */ async detect(_config?: ResourceDetectionConfig): Promise<Resource> { const attributes: ResourceAttributes = {}; const env = getEnv(); const rawAttributes = env.OTEL_RESOURCE_ATTRIBUTES; const serviceName = env.OTEL_SERVICE_NAME; if (rawAttributes) { try { const parsedAttributes = this._parseResourceAttributes(rawAttributes); Object.assign(attributes, parsedAttributes); } catch (e) { diag.debug(`EnvDetector failed: ${e.message}`); } } if (serviceName) { attributes[SemanticResourceAttributes.SERVICE_NAME] = serviceName; } return new Resource(attributes); } /** * Creates an attribute map from the OTEL_RESOURCE_ATTRIBUTES environment * variable. * * OTEL_RESOURCE_ATTRIBUTES: A comma-separated list of attributes describing * the source in more detail, e.g. “key1=val1,key2=val2”. Domain names and * paths are accepted as attribute keys. Values may be quoted or unquoted in * general. If a value contains whitespaces, =, or " characters, it must * always be quoted. * * @param rawEnvAttributes The resource attributes as a comma-seperated list * of key/value pairs. * @returns The sanitized resource attributes. */ private _parseResourceAttributes( rawEnvAttributes?: string ): ResourceAttributes { if (!rawEnvAttributes) return {}; const attributes: ResourceAttributes = {}; const rawAttributes: string[] = rawEnvAttributes.split( this._COMMA_SEPARATOR, -1 ); for (const rawAttribute of rawAttributes) { const keyValuePair: string[] = rawAttribute.split( this._LABEL_KEY_VALUE_SPLITTER, -1 ); if (keyValuePair.length !== 2) { continue; } let [key, value] = keyValuePair; // Leading and trailing whitespaces are trimmed. key = key.trim(); value = value.trim().split('^"|"$').join(''); if (!this._isValidAndNotEmpty(key)) { throw new Error(`Attribute key ${this._ERROR_MESSAGE_INVALID_CHARS}`); } if (!this._isValid(value)) { throw new Error(`Attribute value ${this._ERROR_MESSAGE_INVALID_VALUE}`); } attributes[key] = value; } return attributes; } /** * Determines whether the given String is a valid printable ASCII string with * a length not exceed _MAX_LENGTH characters. * * @param str The String to be validated. * @returns Whether the String is valid. */ private _isValid(name: string): boolean { return name.length <= this._MAX_LENGTH && this._isPrintableString(name); } private _isPrintableString(str: string): boolean { for (let i = 0; i < str.length; i++) { const ch: string = str.charAt(i); if (ch <= ' ' || ch >= '~') { return false; } } return true; } /** * Determines whether the given String is a valid printable ASCII string with * a length greater than 0 and not exceed _MAX_LENGTH characters. * * @param str The String to be validated. * @returns Whether the String is valid and not empty. */ private _isValidAndNotEmpty(str: string): boolean { return str.length > 0 && this._isValid(str); } } export const envDetector = new EnvDetector();
apache-2.0
jexp/idea2
platform/lang-impl/src/com/intellij/openapi/roots/ui/configuration/DefaultModulesProvider.java
1618
/* * Copyright 2000-2009 JetBrains s.r.o. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intellij.openapi.roots.ui.configuration; import com.intellij.openapi.module.Module; import com.intellij.openapi.module.ModuleManager; import com.intellij.openapi.project.Project; import com.intellij.openapi.roots.ModuleRootManager; import com.intellij.openapi.roots.ModuleRootModel; import com.intellij.facet.FacetModel; import com.intellij.facet.FacetManager; /** * @author nik */ public class DefaultModulesProvider implements ModulesProvider { private final Project myProject; public DefaultModulesProvider(final Project project) { myProject = project; } public Module[] getModules() { return ModuleManager.getInstance(myProject).getModules(); } public Module getModule(String name) { return ModuleManager.getInstance(myProject).findModuleByName(name); } public ModuleRootModel getRootModel(Module module) { return ModuleRootManager.getInstance(module); } public FacetModel getFacetModel(Module module) { return FacetManager.getInstance(module); } }
apache-2.0
PyThaiNLP/pythainlp
pythainlp/cli/tag.py
2123
""" thainlp tag command line. """ import argparse from pythainlp import cli from pythainlp.tag import locations, named_entity, pos_tag class SubAppBase: def __init__(self, name, argv): parser = argparse.ArgumentParser(**cli.make_usage("tag " + name)) parser.add_argument( "text", type=str, help="input text", ) parser.add_argument( "-s", "--sep", dest="separator", type=str, help=f"Token separator for input text. default: {self.separator}", default=self.separator, ) args = parser.parse_args(argv) self.args = args tokens = args.text.split(args.separator) result = self.run(tokens) for word, tag in result: print(word, "/", tag) class POSTaggingApp(SubAppBase): def __init__(self, *args, **kwargs): self.separator = "|" self.run = pos_tag super().__init__(*args, **kwargs) class App: def __init__(self, argv): parser = argparse.ArgumentParser( prog="tag", description="Annotate a text with linguistic information", usage=( 'thainlp tag <tag_type> [--sep "<separator>"] "<text>"\n\n' "tag_type:\n\n" "pos part-of-speech\n\n" "<separator> and <text> should be inside double quotes.\n" "<text> should be a tokenized text, " "with tokens separated by <separator>.\n\n" "Example:\n\n" 'thainlp tag pos -s " " "แรงดึงดูด เก็บ หัว คุณ ลง"\n\n' "--" ), ) parser.add_argument("tag_type", type=str, help="[pos]") args = parser.parse_args(argv[2:3]) cli.exit_if_empty(args.tag_type, parser) tag_type = str.lower(args.tag_type) argv = argv[3:] if tag_type == "pos": POSTaggingApp("Part-of-Speech tagging", argv) else: print(f"Tag type not available: {tag_type}")
apache-2.0
guardian/frontend-email-reporting
app/views/main.scala.html
3551
@(title: String)(content: Html) <!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="viewport" content="width=device-width, initial-scale=1"> <!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags --> <title>@title</title> <!-- Bootstrap --> <!-- Latest compiled and minified CSS --> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap.min.css" integrity="sha384-1q8mTJOASx8j1Au+a5WDVnPi2lkFfwwEAa8hDDdjZlpLegxhjVME1fgjWPGmkzs7" crossorigin="anonymous"> <!-- Optional theme --> <link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.6/css/bootstrap-theme.min.css" integrity="sha384-fLW2N01lMqjakBkx3l/M9EahuwpSfeNvV63J5ezn3uZzapT0u7EYsXMjQV+0En5r" crossorigin="anonymous"> <link rel="stylesheet" media="screen" href="@routes.Assets.versioned("stylesheets/daterangepicker.css")"> <link rel="stylesheet" media="screen" href="@routes.Assets.versioned("stylesheets/dashboard.css")"> @*<link rel="stylesheet" media="screen" href="@routes.Assets.versioned("stylesheets/dashboard.css")">*@ @*<link rel="shortcut icon" type="image/png" href="@routes.Assets.versioned("images/favicon.png")">*@ <!-- HTML5 shim and Respond.js for IE8 support of HTML5 elements and media queries --> <!-- WARNING: Respond.js doesn't work if you view the page via file:// --> <!--[if lt IE 9]> <script src="https://oss.maxcdn.com/html5shiv/3.7.2/html5shiv.min.js"></script> <script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script> <![endif]--> <!-- jQuery (necessary for Bootstrap's JavaScript plugins) --> <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script> <script src="@routes.Assets.versioned("javascripts/main.js")"></script> </head> <body> <nav class="navbar navbar-inverse navbar-fixed-top"> <div class="container-fluid"> <div class="navbar-header"> <button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false" aria-controls="navbar"> <span class="sr-only">Toggle navigation</span> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </button> <a class="navbar-brand" href="#">Email Reports</a> </div> <div id="navbar" class="navbar-collapse collapse"> <ul class="nav navbar-nav navbar-right"> <li>@fragments.datePicker()</li> <li><a href="/">Dashboard</a></li> <li><a href="/rawstats">Raw Stats</a></li> </ul> </div> </div> </nav> <div class="container-fluid"> <div class="row"> <div class="col-sm-11 col-sm-offset-1 col-md-12 col-md-offset-0 main"> <h1 class="page-header">Daily Email reports</h1> <div class="row placeholders"> @content </div> </div> </div> </div> </body> </html>
apache-2.0
oVirt/ovirt-engine-extension-aaa-ldap
src/main/java/org/ovirt/engine/extension/aaa/ldap/Util.java
13912
/* * Copyright 2012-2015 Red Hat Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ package org.ovirt.engine.extension.aaa.ldap; import java.io.File; import java.io.FileInputStream; import java.io.FileNotFoundException; import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.Reader; import java.lang.reflect.Array; import java.lang.reflect.Constructor; import java.lang.reflect.Field; import java.lang.reflect.Method; import java.lang.reflect.Modifier; import java.nio.charset.StandardCharsets; import java.security.GeneralSecurityException; import java.security.KeyStore; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.LinkedList; import java.util.List; import java.util.Map; import java.util.Properties; import java.util.regex.Matcher; import java.util.regex.Pattern; public class Util { private static final Pattern VAR_PATTERN = Pattern.compile("\\$\\{((?<namespace>[^:}]*):)?(?<var>[^}]*)\\}"); private static final Map<Class<?>, Class<?>> typeBox = new HashMap<>(); static { typeBox.put(boolean.class, Boolean.class); typeBox.put(byte.class, Byte.class); typeBox.put(char.class, Character.class); typeBox.put(double.class, Double.class); typeBox.put(float.class, Float.class); typeBox.put(int.class, Integer.class); typeBox.put(long.class, Long.class); typeBox.put(short.class, Short.class); typeBox.put(void.class, Void.class); } public static String toString(Object o, String def) { return o != null ? o.toString() : def; } public static String toString(Object o) { return toString(o, ""); } public static void removeKeysWithPrefix(Map<String, Object> map, String prefix) { Iterator<Map.Entry<String, Object>> iter = map.entrySet().iterator(); while(iter.hasNext()) { Map.Entry<String, Object> e = iter.next(); if (e.getKey().startsWith(prefix)) { iter.remove(); } } } public static String expandString(String s, String namespace, Map<? extends Object, ? extends Object> vars) { StringBuilder ret = new StringBuilder(); Matcher m = VAR_PATTERN.matcher(s); int last = 0; while (m.find()) { ret.append(s.substring(last, m.start())); if ( (namespace == null && m.group("namespace") == null) || (namespace != null && namespace.equals(m.group("namespace"))) ) { Object o = vars.get(m.group("var")); if (o != null) { ret.append(o); } } else { ret.append(m.group(0)); } last = m.end(); } ret.append(s.substring(last, m.regionEnd())); return ret.toString(); } public static void _expandMap(MapProperties props, String namespace, Map<? extends Object, ? extends Object> vars) { if (props.getValue() != null) { props.setValue(expandString(props.getValue(), namespace, vars)); } for (MapProperties entry : props.getMap().values()) { _expandMap(entry, namespace, vars); } } public static MapProperties expandMap(MapProperties props, String namespace, Map<? extends Object, ? extends Object> vars) { MapProperties ret = new MapProperties(props); MapProperties old; do { old = new MapProperties(ret); _expandMap(ret, namespace, vars); } while(!old.equals(ret)); return ret; } public static Properties expandProperties( Properties props, String namespace, Map<? extends Object, ? extends Object> vars, boolean recursive ) { Properties ret = new Properties(); ret.putAll(props); Properties old; do { old = new Properties(); old.putAll(ret); for (Map.Entry<Object, Object> entry : ret.entrySet()) { entry.setValue(expandString(entry.getValue().toString(), namespace, vars)); } } while(recursive && !old.equals(ret)); return ret; } public static List<String> stringPropertyNames(Properties props, String prefix) { if (prefix.endsWith(".")) { prefix = prefix.substring(0, prefix.length()-1); } List<String> keys = new LinkedList<>(); for (String key : props.stringPropertyNames()) { if (key.equals(prefix) || key.startsWith(prefix + ".")) { keys.add(key); } } Collections.sort(keys); return keys; } public static void includeProperties( Properties out, String includeKey, List<File> includeDirectories, File file ) throws IOException { Properties props = new Properties(); try ( InputStream is = new FileInputStream(file); Reader reader = new InputStreamReader(is, StandardCharsets.UTF_8); ) { props.load(reader); } props.put("_basedir", file.getParent()); props = expandProperties(props, "local", props, true); for (String key : stringPropertyNames(props, includeKey)) { String include = props.getProperty(key); File includeFile = null; if (include.startsWith("<") && include.endsWith(">")) { include = include.substring(1, include.length()-1); for (File i : includeDirectories) { File t = new File(i, include); if (t.exists()) { includeFile = t; break; } } if (includeFile == null) { throw new FileNotFoundException( String.format( "Cannot include file '%s' from search path %s", include, includeDirectories ) ); } } else { includeFile = new File(include); if (!includeFile.isAbsolute()) { includeFile = new File(file.getParentFile(), include); } } includeProperties(out, includeKey, includeDirectories, includeFile); } for (Map.Entry<Object, Object> entry : props.entrySet()) { out.put(entry.getKey(), entry.getValue()); } } public static Properties loadProperties(List<File> includeDirectories, File... file) throws IOException { Properties props = new Properties(); for (File f : file) { includeProperties(props, "include", includeDirectories, f); } props = expandProperties(props, "global", props, true); props = expandProperties(props, "sys", System.getProperties(), false); return props; } public static int[] asIntArray(List<?> l, int def, int size) { int[] ret = new int[size]; Arrays.fill(ret, def); for (int i = 0; i < l.size() && i < size; i++) { ret[i] = Integer.valueOf(l.get(i).toString()); } return ret; } public static List<String> getValueFromMapRecord(MapProperties props, String key) { List<String> ret = new ArrayList<>(); for (MapProperties entry : props.getMap().values()) { String v = entry.getString(null, key); if (v != null) { ret.add(v); } } return ret; } public static Object getObjectValueByString(Class<?> clazz, String value) { Object v = null; if (clazz.isPrimitive()) { clazz = typeBox.get(clazz); } if (v == null) { if (clazz.equals(Collection.class)) { List<Object> r = new ArrayList<>(); for (String c : value.trim().split(" *, *")) { if (!c.isEmpty()) { r.add(getObjectValueByString(String.class, c)); } } v = r; } } if (v == null) { if (clazz.isArray() && Object.class.isAssignableFrom(clazz.getComponentType())) { List<Object> r = new ArrayList<>(); for (String c : value.trim().split(" *, *")) { if (!c.isEmpty()) { r.add(getObjectValueByString(clazz.getComponentType(), c)); } } v = (Object)r.toArray((Object[]) Array.newInstance(clazz.getComponentType(), 0)); } } if (v == null) { try { Field f = clazz.getField(value); if (Modifier.isStatic(f.getModifiers())) { v = f.get(null); } } catch(ReflectiveOperationException e) {} } if (v == null) { try { Method convert = clazz.getMethod("valueOf", String.class); if (Modifier.isStatic(convert.getModifiers())) { v = convert.invoke(null, value); } } catch(ReflectiveOperationException e) {} } if (v == null) { try { Method convert = clazz.getMethod("valueOf", Object.class); if (Modifier.isStatic(convert.getModifiers())) { v = convert.invoke(null, value); } } catch(ReflectiveOperationException e) {} } if (v == null) { try { Constructor<?> constructor = clazz.getDeclaredConstructor(String.class); v = constructor.newInstance(value); } catch(ReflectiveOperationException e) {} } return v; } public static void setObjectByProperties(Object o, MapProperties props, String... methodPrefixes) { if (props == null) { return; } for (Method m : o.getClass().getMethods()) { for (String p : methodPrefixes) { String methodName = m.getName(); if (methodName.startsWith(p)) { String name = ( methodName.substring(p.length(), p.length()+1).toLowerCase() + methodName.substring(p.length()+1) ); try { List<String> values = new ArrayList<>(); MapProperties valueProps = props.getOrEmpty(name); values.add(valueProps.getValue()); for (MapProperties valueProps1 : valueProps.getMap().values()) { values.add(valueProps1.getValue()); } for (String value : values) { if (value != null) { Class<?>[] args = m.getParameterTypes(); if (args.length == 1) { Object v = getObjectValueByString(args[0], value); if (v != null) { m.invoke(o, v); } } } } } catch(Exception e) { throw new RuntimeException( String.format( "Cannot set key '%s', error: %s", name, e.getMessage() ), e ); } } } } } public static <T extends Enum<T>> List<T> getEnumFromString(Class<T> clazz, String value) { List<T> ret = new ArrayList<>(); if (value != null) { String[] comps = value.trim().split(" *, *"); for (String c : comps) { if (!c.isEmpty()) { ret.add(T.valueOf(clazz, c)); } } } return ret; } public static KeyStore loadKeyStore(String provider, String type, String file, String password) throws GeneralSecurityException, IOException { KeyStore store = null; if (file != null) { try (InputStream in = new FileInputStream(file)) { if (type == null) { type = KeyStore.getDefaultType(); } if (provider == null) { store = KeyStore.getInstance( type ); } else { store = KeyStore.getInstance( type, provider ); } store.load(in, password.toCharArray()); } } return store; } } // vim: expandtab tabstop=4 shiftwidth=4
apache-2.0
werval/werval
io.werval.modules/io.werval.modules.xml/src/main/java/io/werval/modules/xml/internal/SAXParserFactoryImpl.java
10474
/* * Copyright (c) 2014 the original author or authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.werval.modules.xml.internal; import java.io.File; import java.io.IOException; import java.io.InputStream; import javax.xml.XMLConstants; import javax.xml.parsers.ParserConfigurationException; import javax.xml.parsers.SAXParser; import javax.xml.parsers.SAXParserFactory; import javax.xml.validation.Schema; import io.werval.modules.xml.SAX; import io.werval.modules.xml.UncheckedXMLException; import org.xml.sax.HandlerBase; import org.xml.sax.InputSource; import org.xml.sax.Parser; import org.xml.sax.SAXException; import org.xml.sax.SAXNotRecognizedException; import org.xml.sax.SAXNotSupportedException; import org.xml.sax.XMLReader; import org.xml.sax.helpers.DefaultHandler; import static io.werval.modules.xml.internal.Internal.ACCESS_EXTERNAL_ALL; import static io.werval.modules.xml.internal.Internal.ACCESS_EXTERNAL_NONE; import static io.werval.modules.xml.internal.Internal.LOG; /** * SAXParserFactory implementation for XMLPlugin. * <p> * Factory API that enables applications to configure and obtain a SAX based parser to parse XML documents. * * @see SAXParserFactory */ public final class SAXParserFactoryImpl extends SAXParserFactory { // Aalto // private final SAXParserFactory delegate = new com.fasterxml.aalto.sax.SAXParserFactoryImpl(); // Woodstox // private final SAXParserFactory delegate = new com.ctc.wstx.sax.WstxSAXParserFactory(); // Xerces private final SAXParserFactory delegate = new org.apache.xerces.jaxp.SAXParserFactoryImpl(); public SAXParserFactoryImpl() throws ParserConfigurationException, SAXException { // Aalto & Woodstox & Xerces delegate.setFeature( SAX.Features.EXTERNAL_GENERAL_ENTITIES, Internal.EXTERNAL_ENTITIES.get() ); delegate.setFeature( SAX.Features.EXTERNAL_PARAMETER_ENTITIES, Internal.EXTERNAL_ENTITIES.get() ); // Xerces delegate.setFeature( XMLConstants.FEATURE_SECURE_PROCESSING, true ); delegate.setFeature( "http://apache.org/xml/features/standard-uri-conformant", true ); setValidating( false ); // No support but should be disabled anyway, belt'n braces delegate.setXIncludeAware( false ); } @Override public void setNamespaceAware( boolean namespaceAware ) { delegate.setNamespaceAware( namespaceAware ); } @Override public void setValidating( boolean dtdValidation ) { try { // Xerces delegate.setFeature( "http://apache.org/xml/features/validation/balance-syntax-trees", dtdValidation ); delegate.setFeature( "http://apache.org/xml/features/nonvalidating/load-dtd-grammar", dtdValidation ); delegate.setFeature( "http://apache.org/xml/features/nonvalidating/load-external-dtd", dtdValidation && Internal.EXTERNAL_ENTITIES.get() ); delegate.setFeature( "http://apache.org/xml/features/disallow-doctype-decl", !dtdValidation ); } catch( ParserConfigurationException | SAXNotRecognizedException | SAXNotSupportedException ex ) { throw new UncheckedXMLException( ex ); } delegate.setValidating( dtdValidation ); if( dtdValidation ) { LOG.warn( "SAXParserFactory.setValidating( true ) Unsafe DTD support enabled" ); } } @Override public boolean isNamespaceAware() { return delegate.isNamespaceAware(); } @Override public boolean isValidating() { return delegate.isValidating(); } @Override public Schema getSchema() { return delegate.getSchema(); } @Override public void setSchema( Schema schema ) { delegate.setSchema( schema ); } @Override public void setXIncludeAware( boolean xIncludeAware ) { delegate.setXIncludeAware( xIncludeAware ); } @Override public boolean isXIncludeAware() { return delegate.isXIncludeAware(); } @Override public SAXParser newSAXParser() throws ParserConfigurationException, SAXException { return new SAXParserImpl( delegate.newSAXParser() ); } @Override public void setFeature( String name, boolean value ) throws ParserConfigurationException, SAXNotRecognizedException, SAXNotSupportedException { delegate.setFeature( name, value ); } @Override public boolean getFeature( String name ) throws ParserConfigurationException, SAXNotRecognizedException, SAXNotSupportedException { return delegate.getFeature( name ); } private static final class SAXParserImpl extends SAXParser { private final SAXParser parser; protected SAXParserImpl( SAXParser saxParser ) throws SAXException { this.parser = saxParser; try { this.parser.setProperty( XMLConstants.ACCESS_EXTERNAL_DTD, Internal.EXTERNAL_ENTITIES.get() ? ACCESS_EXTERNAL_ALL : ACCESS_EXTERNAL_NONE ); } catch( SAXException ex ) { LOG.trace( "JAXP<1.5 - {} on {}", ex.getMessage(), this.parser ); } try { this.parser.setProperty( XMLConstants.ACCESS_EXTERNAL_SCHEMA, Internal.EXTERNAL_ENTITIES.get() ? ACCESS_EXTERNAL_ALL : ACCESS_EXTERNAL_NONE ); } catch( SAXException ex ) { LOG.trace( "JAXP<1.5 - {} on {}", ex.getMessage(), this.parser ); } } @Override public void reset() { parser.reset(); } @Override public void parse( InputStream inputStream, HandlerBase handlerBase ) throws SAXException, IOException { parser.parse( inputStream, handlerBase ); } @Override public void parse( InputStream inputStream, HandlerBase handlerBase, String systemId ) throws SAXException, IOException { parser.parse( inputStream, handlerBase, systemId ); } @Override public void parse( InputStream inputStream, DefaultHandler defaultHandler ) throws SAXException, IOException { parser.parse( inputStream, defaultHandler ); } @Override public void parse( InputStream inputStream, DefaultHandler defaultHandler, String systemId ) throws SAXException, IOException { parser.parse( inputStream, defaultHandler, systemId ); } @Override public void parse( String s, HandlerBase handlerBase ) throws SAXException, IOException { parser.parse( s, handlerBase ); } @Override public void parse( String s, DefaultHandler defaultHandler ) throws SAXException, IOException { parser.parse( s, defaultHandler ); } @Override public void parse( File file, HandlerBase handlerBase ) throws SAXException, IOException { parser.parse( file, handlerBase ); } @Override public void parse( File file, DefaultHandler defaultHandler ) throws SAXException, IOException { parser.parse( file, defaultHandler ); } @Override public void parse( InputSource inputSource, HandlerBase handlerBase ) throws SAXException, IOException { parser.parse( inputSource, handlerBase ); } @Override public void parse( InputSource inputSource, DefaultHandler defaultHandler ) throws SAXException, IOException { parser.parse( inputSource, defaultHandler ); } @Override public Parser getParser() throws SAXException { return parser.getParser(); } @Override public XMLReader getXMLReader() throws SAXException { XMLReader reader = parser.getXMLReader(); try { reader.setFeature( XMLConstants.FEATURE_SECURE_PROCESSING, true ); } catch( SAXNotRecognizedException ex ) { LOG.trace( "JAXP<1.5 - {} on {}", ex.getMessage(), reader ); } reader.setFeature( SAX.Features.EXTERNAL_GENERAL_ENTITIES, Internal.EXTERNAL_ENTITIES.get() ); reader.setFeature( SAX.Features.EXTERNAL_PARAMETER_ENTITIES, Internal.EXTERNAL_ENTITIES.get() ); reader.setEntityResolver( Internal.RESOLVER.get() ); reader.setErrorHandler( Errors.INSTANCE ); return reader; } @Override public boolean isNamespaceAware() { return parser.isNamespaceAware(); } @Override public boolean isValidating() { return parser.isValidating(); } @Override public void setProperty( String name, Object value ) throws SAXNotRecognizedException, SAXNotSupportedException { parser.setProperty( name, value ); } @Override public Object getProperty( String name ) throws SAXNotRecognizedException, SAXNotSupportedException { return parser.getProperty( name ); } @Override public Schema getSchema() { return parser.getSchema(); } @Override public boolean isXIncludeAware() { return parser.isXIncludeAware(); } } }
apache-2.0
ChuyX3/angsys
branch/old_angsys/angsys_beta1/angsys_android/include/angsys/ang/core/async.h
23500
/*********************************************************************************************************************/ /* File Name: ang/async.h */ /* Author: Ing. Jesus Rocha <[email protected]>, July 2016. */ /* File description: this file exposes threading management as well as useful objects and operations for */ /* synchronization. */ /* */ /* Copyright (C) angsys, Jesus Angel Rocha Morales */ /* You may opt to use, copy, modify, merge, publish and/or distribute copies of the Software, and permit persons */ /* to whom the Software is furnished to do so. */ /* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. */ /* */ /*********************************************************************************************************************/ #ifndef __ANG_ASYNC_H__ #define __ANG_ASYNC_H__ #include <angsys.h> #include <ang/core/delegates.h> #ifdef LINK #undef LINK #endif//LINK #if defined WINDOWS_PLATFORM #if defined ANGSYS_DYNAMIC_LIBRARY #ifdef ANGSYS_EXPORTS #define LINK __declspec(dllexport) #else #define LINK __declspec(dllimport) #endif//ANGSYS_EXPORTS #else//#elif defined ANGSYS_STATIC_LIBRARY #define LINK #endif//ANGSYS_DYNAMIC_LIBRARY #elif defined LINUX_PLATFORM || defined ANDROID_PLATFORM #define LINK #endif namespace ang { namespace core { namespace async { class mutex; class cond; class thread; class waiter; struct iasync_task; class dispatcher_thread; template<typename result_t>struct iasync; template<typename return_t>class async_task; template<typename result_t> class async_task_result; typedef object_wrapper<mutex> mutex_t; typedef object_wrapper<cond> cond_t; typedef object_wrapper<waiter> waiter_t; typedef object_wrapper<thread> thread_t; typedef intf_wrapper<iasync_task> iasync_task_t; typedef object_wrapper<dispatcher_thread> dispatcher_thread_t; template <typename result_t> using iasync_t = intf_wrapper<iasync<result_t>>; template <typename result_t> using async_task_t = object_wrapper<async_task<result_t>>; template<typename result_t> using async_task_result_t = object_wrapper<async_task_result<result_t>>; typedef delegates::function<dword(pointer)> thread_callback_t; typedef delegates::listener<dword(pointer)> thread_event_listener_t; /******************************************************************/ /* enum ang::core::async::thread_priority : */ /* -> represents the thread priority */ /******************************************************************/ ANG_BEGIN_ENUM(LINK, thread_priority, word) low, normal, high, ANG_END_ENUM(thread_priority); /******************************************************************/ /* enum ang::core::async::detach_state */ /* -> represents a thread capability */ /******************************************************************/ ANG_BEGIN_ENUM(LINK, detach_state, word) joinable = 0, detached = 1 ANG_END_ENUM(detach_state); /******************************************************************/ /* falg ang::core::async::async_action_status */ /* -> represents the thread status */ /******************************************************************/ ANG_BEGIN_FLAGS(LINK, async_action_status, uint) none = 0, starting = 0X01, running = 0X02, suspended = 0X04, canceled = 0X08, completed = 0X10, finished = canceled | completed, stopped = finished | starting, not_running = stopped | suspended, error = 0XFF00, ANG_END_FLAGS(async_action_status); ANG_BEGIN_INTERFACE(LINK, iasync_task) visible vcall bool wait(async_action_status_t, dword)const pure; visible vcall async_action_status_t status()const pure; visible vcall bool cancel()pure; visible vcall bool suspend()pure; visible vcall bool resume()pure; ANG_END_INTERFACE(); /******************************************************************/ /* interface ang::core::async::iasync< result_t > */ /* -> represents a asynchronous operation, it can be waited for */ /* its result or any status change */ /******************************************************************/ template<typename result_t> ANG_BEGIN_INLINE_INTERFACE_WITH_BASE(iasync, public iasync_task) visible vcall result_t result()const pure; visible vcall void then(delegates::function<void(iasync<result_t>*)>)pure; ANG_END_INTERFACE(); template<> ANG_BEGIN_INTERFACE_WITH_BASE(LINK, iasync<void>, public iasync_task) visible vcall void result()const pure; visible vcall void then(delegates::function<void(iasync<void>*)>)pure; ANG_END_INTERFACE(); }//async }//core }//ang //ANG_DECLARE_INTERFACE_VECTOR_SPECIALIZATION(LINK, ang::core::async::iasync_task); namespace ang { template <typename result_t> class intf_wrapper<core::async::iasync<result_t>> { public: typedef core::async::iasync<result_t> type; protected: type* _ptr; public: intf_wrapper(); intf_wrapper(type*); intf_wrapper(ang::nullptr_t const&); intf_wrapper(intf_wrapper &&); intf_wrapper(intf_wrapper const&); ~intf_wrapper(); public: void clean(); bool is_empty()const; type* get(void)const; void set(type*); type ** addres_of(void); template<class then_result_t> core::async::iasync_t<then_result_t> then(core::delegates::function<then_result_t(core::async::iasync<result_t>*)> func); inline bool wait(core::async::async_action_status_t status, dword ms = -1)const { if (_ptr)return _ptr->wait(status, ms); return false; } public: intf_wrapper& operator = (type*); intf_wrapper& operator = (ang::nullptr_t const&); intf_wrapper& operator = (intf_wrapper &&); intf_wrapper& operator = (intf_wrapper const&); intf_wrapper_ptr<type> operator & (void); type * operator -> (void); type const* operator -> (void)const; operator type * (void); operator type const* (void)const; }; /******************************************************************/ /* calss ang::core::async::mutex_t */ /* -> represents a system mutex */ /******************************************************************/ template<> class LINK object_wrapper<core::async::mutex> { public: typedef core::async::mutex type; protected: core::async::mutex* _ptr; public: object_wrapper(); object_wrapper(object_wrapper &&); object_wrapper(object_wrapper const&); object_wrapper(std::nullptr_t const&); object_wrapper(core::async::mutex*); ~object_wrapper(); public: void clean(); bool is_empty()const; core::async::mutex* get(void)const; void set(type*); object_wrapper& operator = (object_wrapper &&); object_wrapper& operator = (object_wrapper const&); object_wrapper& operator = (core::async::mutex*); operator core::async::mutex*(); operator core::async::mutex const*()const; core::async::mutex* operator -> ()const; }; /******************************************************************/ /* calss ang::core::async::mutex_t */ /* -> represents a system asynchronous condition or event */ /******************************************************************/ template<> class LINK object_wrapper<core::async::cond> { public: typedef core::async::cond type; protected: core::async::cond* _ptr; public: object_wrapper(); object_wrapper(object_wrapper &&); object_wrapper(object_wrapper const&); object_wrapper(std::nullptr_t const&); object_wrapper(core::async::cond*); ~object_wrapper(); public: void clean(); void clean_unsafe(); bool is_empty()const; core::async::cond* get(void)const; void set(core::async::cond*); object_wrapper& operator = (object_wrapper &&); object_wrapper& operator = (object_wrapper const&); object_wrapper& operator = (core::async::cond*); operator core::async::cond*(); operator core::async::cond const*()const; core::async::cond* operator -> ()const; }; /******************************************************************/ /* calss ang::core::async::waiter_t */ /* -> object that can wait for multiple asinc operations */ /******************************************************************/ template<> class LINK object_wrapper<core::async::waiter> { public: typedef core::async::waiter type; protected: core::async::waiter* _ptr; public: object_wrapper(); object_wrapper(object_wrapper &&); object_wrapper(object_wrapper const&); object_wrapper(std::nullptr_t const&); object_wrapper(core::async::waiter*); ~object_wrapper(); public: void clean(); void clean_unsafe(); bool is_empty()const; core::async::waiter* get(void)const; void set(core::async::waiter*); object_wrapper& operator = (object_wrapper &&); object_wrapper& operator = (object_wrapper const&); object_wrapper& operator = (core::async::waiter*); object_wrapper& operator += (core::async::iasync_task_t); operator core::async::waiter*(); operator core::async::waiter const*()const; core::async::waiter* operator -> ()const; }; namespace core { namespace async { /******************************************************************/ /* calss ang::core::async::mutex */ /* -> is the mutex_t internal implementation */ /******************************************************************/ class LINK mutex final : public object { private: //Non copiable mutex(const mutex&) = delete; mutex& operator = (const mutex&) = delete; protected: ang_core_mutex_ptr_t _handle; public: mutex(); mutex(bool lock); virtual~mutex(); public: //Overrides ANG_DECLARE_INTERFACE(); public: //Properties bool lock()const; bool trylock()const; bool unlock()const; friend cond; }; /******************************************************************/ /* calss ang::core::async::scope_locker */ /* -> is a useful mutex locker limited to its cycle life */ /******************************************************************/ class scope_locker final { private: mutex_t _mutex; public: inline scope_locker(mutex_t m) : _mutex(m) { if (!_mutex.is_empty()) { _mutex->lock(); } } inline ~scope_locker() { if (_mutex) { _mutex->unlock(); _mutex = null; } } template<typename func_t> static auto lock(mutex_t m, func_t func) -> decltype(func()) { scope_locker _lock = m; return func(); } }; /******************************************************************/ /* calss ang::core::async::cond */ /* -> is the cond_t internal implementation */ /******************************************************************/ class LINK cond final : public object { private: //Non copiable cond(const cond&); cond& operator = (const cond&); protected: ang_core_cond_ptr_t _handle; public: cond(); virtual~cond(); public: //Overrides ANG_DECLARE_INTERFACE(); public: //Properties bool wait(mutex_t mutex)const; bool wait(mutex_t mutex, dword ms)const; bool signal()const; template<typename func_t> bool waitfor(mutex_t mutex, dword ms, func_t f) { if (f()) { wait(mutex, ms); } return !f(); } template<typename func_t> void waitfor(mutex_t mutex, func_t f) { while (f()) { wait(mutex); } } }; /******************************************************************/ /* calss ang::core::async::waiter */ /* -> is the waiter_t internal implementation */ /******************************************************************/ class LINK waiter final : public object { private: //Non copiable waiter(const waiter&); waiter& operator = (const waiter&); protected: collections::vector<iasync_task_t> _handle; public: waiter(); virtual~waiter(); public: //Overrides ANG_DECLARE_INTERFACE(); public: //Properties bool add(iasync_task_t); bool wait_all(async_action_status_t, dword ms)const; void clean(); }; }//async }//core }//ang namespace ang { /******************************************************************/ /* calss ang::core::async::thread_t */ /* -> represents a system worker thread */ /******************************************************************/ template<> class LINK object_wrapper<core::async::thread> { public: typedef core::async::thread type; protected: type* _ptr; public: object_wrapper(); object_wrapper(object_wrapper &&); object_wrapper(object_wrapper const&); object_wrapper(std::nullptr_t const&); object_wrapper(type*); ~object_wrapper(); public: void clean(); void clean_unsafe(); bool is_empty()const; type* get(void)const; void set(type*); object_wrapper& operator = (object_wrapper &&); object_wrapper& operator = (object_wrapper const&); object_wrapper& operator = (type*); operator type*(); operator type const*()const; type* operator -> (); type const* operator -> ()const; }; namespace core { namespace async { /******************************************************************/ /* calss ang::core::async::thread */ /* -> is the thread_t internal implementation */ /******************************************************************/ class LINK thread : public object , public iasync<dword> { public: static void sleep(dword ms); static thread_t current_thread(); static dword current_thread_id(); static thread_t main_thread(); template<typename calleable_t, typename... args_t> static thread_t create_worker_thread(calleable_t callback, args_t... args) { thread_t worker = new thread(); if(!worker->start([=](pointer)->dword { callback(args...); return 0U; }, null)) return null; worker->wait(async_action_status::running, -1); return worker; } protected: ang_core_thread_ptr_t _handle; public: thread(); public: //Overrides ANG_DECLARE_INTERFACE(); virtual bool start(thread_callback_t callback, pointer args , thread_priority_t priority = thread_priority::normal , detach_state_t ds = detach_state::joinable); pointer handle()const; bool is_created()const; public: virtual bool wait(async_action_status_t, dword)const override; virtual async_action_status_t status()const override; virtual bool cancel()override; virtual bool suspend()override; virtual bool resume()override; virtual dword result()const override; virtual void then(delegates::function<void(iasync<dword>*)>)override; bool is_main_thread()const; bool is_current_thread()const; void join(); dword id()const; private: //Properties void complete(dword); void dettach(); protected: virtual~thread(); }; }//async }//core }//ang namespace ang { /******************************************************************/ /* calss ang::core::async::thread_t */ /* -> represents a system worker thread */ /******************************************************************/ template<> class LINK object_wrapper<core::async::dispatcher_thread> { public: typedef core::async::dispatcher_thread type; protected: type* _ptr; public: object_wrapper(); object_wrapper(object_wrapper &&); object_wrapper(object_wrapper const&); object_wrapper(std::nullptr_t const&); object_wrapper(type*); ~object_wrapper(); public: void clean(); void clean_unsafe(); bool is_empty()const; type* get(void)const; void set(type*); object_wrapper& operator = (object_wrapper &&); object_wrapper& operator = (object_wrapper const&); object_wrapper& operator = (type*); operator type*(); operator type const*()const; type* operator -> (); type const* operator -> ()const; }; template<> class LINK object_wrapper<core::delegates::function_data<void(core::async::iasync<void>*)>> { public: typedef core::delegates::function_data<void(core::async::iasync<void>*)> type; protected: type* _ptr; public: object_wrapper(); object_wrapper(object_wrapper &&); object_wrapper(object_wrapper const&); object_wrapper(std::nullptr_t const&); ~object_wrapper(); template<typename calleable_t> object_wrapper(calleable_t const& func) : object_wrapper() { set(new core::delegates::static_function<calleable_t, void, core::async::iasync<void>*>(func)); } template<typename obj_t> object_wrapper(obj_t* obj, void(obj_t::*f)(void)) : object_wrapper() { set(new core::delegates::member_function<obj_t, void, core::async::iasync<void>*>(obj, f)); } public: void clean(); void clean_unsafe(); bool is_empty()const; type* get(void)const; void set(type*); type ** addres_of(void); void invoke(core::async::iasync<void>* args)const { if (is_empty()) return; return get()->invoke(args); } public: object_wrapper& operator = (object_wrapper &&); object_wrapper& operator = (object_wrapper const&); object_wrapper& operator = (std::nullptr_t const&) { clean(); return*this; } operator object_t()const { return _ptr; } template<typename calleable_t> object_wrapper& operator = (calleable_t func) { set(new core::delegates::static_function<calleable_t, void, core::async::iasync<void>*>(func)); return*this; } object_wrapper_ptr<type> operator & (void); void operator()(core::async::iasync<void>* args)const { invoke(args); } friend safe_pointer; }; namespace core { namespace async { template<typename T> class async_task_result final : public object , public iasync<T> { private: T _result; cond_t _cond; mutex_t _mutex; bool _was_canceled; bool _handled; thread_t _thread; async_action_status_t _status; delegates::function<void(iasync<T>*)> _then; public: inline async_task_result(); inline async_task_result(thread_t, mutex_t, cond_t); private: inline virtual~async_task_result(); async_task_result(const async_task_result<T>&) = delete; async_task_result& operator =(const async_task_result<T>&) = delete; public: ANG_DECLARE_INTERFACE(); inline void complete(); inline void complete(const T&); inline bool wait(async_action_status_t, dword)const override; inline async_action_status_t status()const override; inline bool cancel() override; inline bool suspend() override; inline bool resume() override; inline T result()const override; inline void then(delegates::function<void(iasync<T>*)>)override; friend async_task<T>; friend dispatcher_thread; }; template<typename T> class async_task final { public: static iasync_t<T> run_async(delegates::function < T(iasync<T>*, var_args_t)> , var_args_t = null, thread_priority_t = thread_priority::normal); static iasync_t<T> run_async(async_task_result<T>*, delegates::function < T(iasync<T>*, var_args_t)> , var_args_t = null, thread_priority_t = thread_priority::normal); static iasync_t<T> run_sync(async_task_result<T>*, delegates::function < T(iasync<T>*, var_args_t)> , var_args_t = null); //private: // iasync<T>* _task; //public: }; template<> class LINK async_task_result<void> final : public object , public iasync<void> { private: cond_t _cond; mutex_t _mutex; bool _was_canceled; bool _handled; thread_t _thread; async_action_status_t _status; delegates::function<void(iasync<void>*)> _then; public: inline async_task_result(); private: inline virtual~async_task_result(); async_task_result(const async_task_result<void>&) = delete; async_task_result& operator =(const async_task_result<void>&) = delete; public: ANG_DECLARE_INTERFACE(); inline void complete(); inline bool wait(async_action_status_t, dword)const override; inline async_action_status_t status()const override; inline bool cancel() override; inline bool suspend() override; inline bool resume() override; inline void result()const override; inline void then(delegates::function<void(iasync<void>*)>)override; friend async_task<void>; }; template<> class LINK async_task<void> final { public: static iasync_t<void> run_async(delegates::function<void(iasync<void>*, var_args_t)> , var_args_t = null, thread_priority_t = thread_priority::normal); static iasync_t<void> run_async(async_task_result<void>*, delegates::function<void(iasync<void>*, var_args_t)> , var_args_t = null, thread_priority_t = thread_priority::normal); }; class LINK dispatcher_thread final : public thread { private: cond_t _cond; mutex_t _mutex; collections::vector<thread_callback_t> _tasks; collections::vector<thread_callback_t> _tasks_backup; public: dispatcher_thread(); public: //Overrides ANG_DECLARE_INTERFACE(); public: bool dispatch(); bool start(thread_callback_t , pointer, thread_priority_t = thread_priority::normal); delegates::listener<void(objptr, pointer)> start_event; delegates::listener<void(objptr, pointer)> end_event; private: virtual bool start(thread_callback_t, pointer , thread_priority_t, detach_state_t) override; public: virtual bool wait(async_action_status_t, dword)const override; virtual async_action_status_t status()const override; virtual bool cancel()override; virtual bool suspend()override; virtual bool resume()override; public: template<class T> iasync_t<T> run_async(delegates::function<T(iasync<T>*, var_args_t)> callback, var_args_t args); template<class T, class... Ts> iasync_t<T> run_async(delegates::function<T(iasync<T>*, var_args_t)> callback, Ts const&...); //private: bool post_task(thread_callback_t callback); private: virtual~dispatcher_thread(); }; }//async }//core }//ang #ifdef LINK #undef LINK #endif #include<ang/core/inlines/async.inl> #endif//__ANG_ASYNC_H__
apache-2.0
mdoering/backbone
life/Plantae/Magnoliophyta/Magnoliopsida/Fabales/Fabaceae/Mimosa/Mimosa mangium/README.md
173
# Mimosa mangium G.Forst. SPECIES #### Status ACCEPTED #### According to International Plant Names Index #### Published in null #### Original name null ### Remarks null
apache-2.0
mdoering/backbone
life/Fungi/Basidiomycota/Pucciniomycetes/Pucciniales/Chaconiaceae/Olivea/Olivea petitiae/README.md
231
# Olivea petitiae Arthur, 1917 SPECIES #### Status ACCEPTED #### According to The Catalogue of Life, 3rd January 2011 #### Published in Mycologia 9(2): 62 (1917) #### Original name Olivea petitiae Arthur, 1917 ### Remarks null
apache-2.0
mdoering/backbone
life/Plantae/Magnoliophyta/Magnoliopsida/Lamiales/Gesneriaceae/Sinningia/Sinningia canescens/ Syn. Corytholoma canescens/README.md
194
# Corytholoma canescens (Mart.) Fritsch SPECIES #### Status SYNONYM #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
apache-2.0
mdoering/backbone
life/Plantae/Bryophyta/Bryopsida/Pottiales/Calymperaceae/Leucophanes/Leucophanes octoblepharioides/README.md
200
# Leucophanes octoblepharioides Bridel, 1827 SPECIES #### Status ACCEPTED #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
apache-2.0
mdoering/backbone
life/Fungi/Ascomycota/Pezizomycetes/Pezizales/Pyronemataceae/Octospora/Octospora thuemenii/ Syn. Octospora thumenii/README.md
209
# Octospora thumenii (P. Karst.) K.B. Khare & V.P. Tewari SPECIES #### Status SYNONYM #### According to Index Fungorum #### Published in null #### Original name Peziza thuemenii P. Karst. ### Remarks null
apache-2.0
marcoveeneman/ChibiOS-Tiva
os/hal/ports/SPC5/SPC5xx/EDMA_v1/spc5_edma.c
25810
/* SPC5 HAL - Copyright (C) 2013 STMicroelectronics Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ /** * @file SPC5xx/spc5_edma.c * @brief EDMA helper driver code. * * @addtogroup SPC5xx_EDMA * @{ */ #include "hal.h" #if SPC5_HAS_EDMA /*===========================================================================*/ /* Driver local definitions. */ /*===========================================================================*/ static const uint8_t g0[16] = {SPC5_EDMA_GROUP0_PRIORITIES}; #if (SPC5_EDMA_NCHANNELS > 16) || defined(__DOXYGEN__) static const uint8_t g1[16] = {SPC5_EDMA_GROUP1_PRIORITIES}; #endif #if (SPC5_EDMA_NCHANNELS > 32) || defined(__DOXYGEN__) static const uint8_t g2[16] = {SPC5_EDMA_GROUP2_PRIORITIES}; static const uint8_t g3[16] = {SPC5_EDMA_GROUP3_PRIORITIES}; #endif /*===========================================================================*/ /* Driver exported variables. */ /*===========================================================================*/ /*===========================================================================*/ /* Driver local variables and types. */ /*===========================================================================*/ /** * @brief Configurations for the various EDMA channels. */ static const edma_channel_config_t *channels[SPC5_EDMA_NCHANNELS]; /*===========================================================================*/ /* Driver local functions. */ /*===========================================================================*/ /*===========================================================================*/ /* Driver interrupt handlers. */ /*===========================================================================*/ /** * @brief EDMA (channels 0..31) error interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector10) { edma_channel_t channel; uint32_t erl, esr = SPC5_EDMA.ESR.R; OSAL_IRQ_PROLOGUE(); /* Scanning for errors.*/ channel = 0; while (((erl = SPC5_EDMA.ERL.R) != 0) && (channel < (SPC5_EDMA_NCHANNELS > 32 ? 32 : SPC5_EDMA_NCHANNELS))) { if ((erl & (1U << channel)) != 0) { /* Error flag cleared.*/ SPC5_EDMA.CER.R = channel; /* If the channel is not associated then the error is simply discarded else the error callback is invoked.*/ if ((channels[channel] != NULL) && (channels[channel]->dma_error_func != NULL)) channels[channel]->dma_error_func(channel, channels[channel]->dma_param, esr); } channel++; } OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 0 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector11) { OSAL_IRQ_PROLOGUE(); if (channels[0] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 0; channels[0]->dma_func(0, channels[0]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 1 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector12) { OSAL_IRQ_PROLOGUE(); if (channels[1] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 1; channels[1]->dma_func(1, channels[1]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 2 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector13) { OSAL_IRQ_PROLOGUE(); if (channels[2] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 2; channels[2]->dma_func(2, channels[2]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 3 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector14) { OSAL_IRQ_PROLOGUE(); if (channels[3] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 3; channels[3]->dma_func(3, channels[3]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 4 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector15) { OSAL_IRQ_PROLOGUE(); if (channels[4] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 4; channels[4]->dma_func(4, channels[4]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 5 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector16) { OSAL_IRQ_PROLOGUE(); if (channels[5] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 5; channels[5]->dma_func(5, channels[5]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 6 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector17) { OSAL_IRQ_PROLOGUE(); if (channels[6] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 6; channels[6]->dma_func(6, channels[6]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 7 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector18) { OSAL_IRQ_PROLOGUE(); if (channels[7] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 7; channels[7]->dma_func(7, channels[7]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 8 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector19) { OSAL_IRQ_PROLOGUE(); if (channels[8] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 8; channels[8]->dma_func(8, channels[8]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 9 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector20) { OSAL_IRQ_PROLOGUE(); if (channels[9] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 9; channels[9]->dma_func(9, channels[9]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 10 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector21) { OSAL_IRQ_PROLOGUE(); if (channels[10] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 10; channels[10]->dma_func(10, channels[10]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 11 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector22) { OSAL_IRQ_PROLOGUE(); if (channels[11] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 11; channels[11]->dma_func(11, channels[11]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 12 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector23) { OSAL_IRQ_PROLOGUE(); if (channels[12] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 12; channels[12]->dma_func(12, channels[12]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 13 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector24) { OSAL_IRQ_PROLOGUE(); if (channels[13] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 13; channels[13]->dma_func(13, channels[13]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 14 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector25) { OSAL_IRQ_PROLOGUE(); if (channels[14] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 14; channels[14]->dma_func(14, channels[14]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 15 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector26) { OSAL_IRQ_PROLOGUE(); if (channels[15] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 15; channels[15]->dma_func(15, channels[15]->dma_param); OSAL_IRQ_EPILOGUE(); } #if (SPC5_EDMA_NCHANNELS > 16) || defined(__DOXYGEN__) /** * @brief EDMA channel 16 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector27) { OSAL_IRQ_PROLOGUE(); if (channels[16] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 16; channels[16]->dma_func(16, channels[16]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 17 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector28) { OSAL_IRQ_PROLOGUE(); if (channels[17] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 17; channels[17]->dma_func(17, channels[17]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 18 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector29) { OSAL_IRQ_PROLOGUE(); if (channels[18] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 18; channels[18]->dma_func(18, channels[18]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 19 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector30) { OSAL_IRQ_PROLOGUE(); if (channels[19] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 19; channels[19]->dma_func(19, channels[19]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 20 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector31) { OSAL_IRQ_PROLOGUE(); if (channels[20] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 20; channels[20]->dma_func(20, channels[20]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 21 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector32) { OSAL_IRQ_PROLOGUE(); if (channels[21] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 21; channels[21]->dma_func(21, channels[21]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 22 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector33) { OSAL_IRQ_PROLOGUE(); if (channels[22] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 22; channels[22]->dma_func(22, channels[22]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 23 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector34) { OSAL_IRQ_PROLOGUE(); if (channels[23] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 23; channels[23]->dma_func(23, channels[23]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 24 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector35) { OSAL_IRQ_PROLOGUE(); if (channels[24] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 24; channels[24]->dma_func(24, channels[24]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 25 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector36) { OSAL_IRQ_PROLOGUE(); if (channels[25] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 25; channels[25]->dma_func(25, channels[25]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 26 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector37) { OSAL_IRQ_PROLOGUE(); if (channels[26] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 26; channels[26]->dma_func(26, channels[26]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 27 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector38) { OSAL_IRQ_PROLOGUE(); if (channels[27] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 27; channels[27]->dma_func(27, channels[27]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 28 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector39) { OSAL_IRQ_PROLOGUE(); if (channels[28] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 28; channels[28]->dma_func(28, channels[28]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 29 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector40) { OSAL_IRQ_PROLOGUE(); if (channels[29] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 29; channels[29]->dma_func(29, channels[29]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 30 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector41) { OSAL_IRQ_PROLOGUE(); if (channels[30] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 30; channels[30]->dma_func(30, channels[30]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 31 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector42) { OSAL_IRQ_PROLOGUE(); if (channels[31] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 31; channels[31]->dma_func(31, channels[31]->dma_param); OSAL_IRQ_EPILOGUE(); } #if (SPC5_EDMA_NCHANNELS > 32) || defined(__DOXYGEN__) /** * @brief EDMA (channels 32..64) error interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector210) { edma_channel_t channel; uint32_t erh, esr = SPC5_EDMA.ESR.R; OSAL_IRQ_PROLOGUE(); /* Scanning for errors.*/ channel = 32; while (((erh = SPC5_EDMA.ERH.R) != 0) && (channel < SPC5_EDMA_NCHANNELS)) { if ((erh & (1U << (channel - 32))) != 0) { /* Error flag cleared.*/ SPC5_EDMA.CER.R = channel; /* If the channel is not associated then the error is simply discarded else the error callback is invoked.*/ if ((channels[channel] != NULL) && (channels[channel]->dma_error_func != NULL)) channels[channel]->dma_error_func(channel, channels[channel]->dma_param, esr); channel++; } } OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 32 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector211) { OSAL_IRQ_PROLOGUE(); if (channels[32] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 32; channels[32]->dma_func(32, channels[32]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 33 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector212) { OSAL_IRQ_PROLOGUE(); if (channels[33] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 33; channels[33]->dma_func(33, channels[33]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 34 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector213) { OSAL_IRQ_PROLOGUE(); if (channels[34] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 34; channels[34]->dma_func(34, channels[34]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 35 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector214) { OSAL_IRQ_PROLOGUE(); if (channels[35] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 35; channels[35]->dma_func(35, channels[35]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 36 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector215) { OSAL_IRQ_PROLOGUE(); if (channels[36] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 36; channels[36]->dma_func(36, channels[36]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 37 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector216) { OSAL_IRQ_PROLOGUE(); if (channels[37] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 37; channels[37]->dma_func(37, channels[37]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 38 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector217) { OSAL_IRQ_PROLOGUE(); if (channels[38] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 38; channels[38]->dma_func(38, channels[38]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 39 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector218) { OSAL_IRQ_PROLOGUE(); if (channels[39] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 39; channels[39]->dma_func(39, channels[39]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 40 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector219) { OSAL_IRQ_PROLOGUE(); if (channels[40] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 40; channels[40]->dma_func(40, channels[40]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 41 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector220) { OSAL_IRQ_PROLOGUE(); if (channels[41] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 41; channels[41]->dma_func(41, channels[41]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 42 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector221) { OSAL_IRQ_PROLOGUE(); if (channels[42] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 42; channels[42]->dma_func(42, channels[42]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 43 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector222) { OSAL_IRQ_PROLOGUE(); if (channels[43] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 43; channels[43]->dma_func(43, channels[43]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 44 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector223) { OSAL_IRQ_PROLOGUE(); if (channels[44] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 44; channels[44]->dma_func(44, channels[44]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 45 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector224) { OSAL_IRQ_PROLOGUE(); if (channels[45] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 45; channels[45]->dma_func(45, channels[45]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 46 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector225) { OSAL_IRQ_PROLOGUE(); if (channels[46] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 46; channels[46]->dma_func(46, channels[46]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 47 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector226) { OSAL_IRQ_PROLOGUE(); if (channels[47] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 47; channels[47]->dma_func(47, channels[47]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 48 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector227) { OSAL_IRQ_PROLOGUE(); if (channels[48] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 48; channels[48]->dma_func(48, channels[48]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 49 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector228) { OSAL_IRQ_PROLOGUE(); if (channels[49] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 49; channels[49]->dma_func(49, channels[49]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 50 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector229) { OSAL_IRQ_PROLOGUE(); if (channels[50] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 50; channels[50]->dma_func(50, channels[50]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 51 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector230) { OSAL_IRQ_PROLOGUE(); if (channels[51] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 51; channels[51]->dma_func(51, channels[51]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 52 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector231) { OSAL_IRQ_PROLOGUE(); if (channels[52] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 52; channels[52]->dma_func(52, channels[52]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 53 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector232) { OSAL_IRQ_PROLOGUE(); if (channels[53] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 53; channels[53]->dma_func(53, channels[53]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 54 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector233) { OSAL_IRQ_PROLOGUE(); if (channels[54] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 54; channels[54]->dma_func(54, channels[54]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 55 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector234) { OSAL_IRQ_PROLOGUE(); if (channels[55] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 55; channels[55]->dma_func(55, channels[55]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 56 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector235) { OSAL_IRQ_PROLOGUE(); if (channels[56] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 56; channels[56]->dma_func(56, channels[56]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 57 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector236) { OSAL_IRQ_PROLOGUE(); if (channels[57] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 57; channels[57]->dma_func(57, channels[57]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 58 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector237) { OSAL_IRQ_PROLOGUE(); if (channels[58] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 58; channels[58]->dma_func(58, channels[58]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 59 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector238) { OSAL_IRQ_PROLOGUE(); if (channels[59] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 59; channels[59]->dma_func(59, channels[59]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 60 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector239) { OSAL_IRQ_PROLOGUE(); if (channels[60] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 60; channels[60]->dma_func(60, channels[60]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 61 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector240) { OSAL_IRQ_PROLOGUE(); if (channels[61] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 61; channels[61]->dma_func(61, channels[61]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 62 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector241) { OSAL_IRQ_PROLOGUE(); if (channels[62] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 62; channels[62]->dma_func(62, channels[62]->dma_param); OSAL_IRQ_EPILOGUE(); } /** * @brief EDMA channel 63 interrupt. * * @isr */ OSAL_IRQ_HANDLER(vector242) { OSAL_IRQ_PROLOGUE(); if (channels[63] == NULL) { SPC5_EDMA_ERROR_HANDLER(); } SPC5_EDMA.CIRQR.R = 63; channels[63]->dma_func(63, channels[63]->dma_param); OSAL_IRQ_EPILOGUE(); } #endif /* SPC5_EDMA_NCHANNELS > 32 */ #endif /* SPC5_EDMA_NCHANNELS > 16 */ /*===========================================================================*/ /* Driver exported functions. */ /*===========================================================================*/ /** * @brief EDMA driver initialization. * * @special */ void edmaInit(void) { unsigned i; SPC5_EDMA.CR.R = SPC5_EDMA_CR_SETTING; SPC5_EDMA.ERQRL.R = 0x00000000; SPC5_EDMA.EEIRL.R = 0x00000000; SPC5_EDMA.IRQRL.R = 0xFFFFFFFF; SPC5_EDMA.ERL.R = 0xFFFFFFFF; #if SPC5_EDMA_NCHANNELS > 32 SPC5_EDMA.ERQRH.R = 0x00000000; SPC5_EDMA.EEIRH.R = 0x00000000; SPC5_EDMA.IRQRH.R = 0xFFFFFFFF; SPC5_EDMA.ERH.R = 0xFFFFFFFF; #endif /* Initializing all the channels with a different priority withing the channels group.*/ for (i = 0; i < 16; i++) { SPC5_EDMA.CPR[i].R = g0[i]; #if SPC5_EDMA_NCHANNELS > 16 SPC5_EDMA.CPR[i + 16].R = g1[i]; #endif #if SPC5_EDMA_NCHANNELS > 32 SPC5_EDMA.CPR[i + 32].R = g2[i]; SPC5_EDMA.CPR[i + 48].R = g3[i]; #endif } /* Error interrupt source.*/ INTC.PSR[10].R = SPC5_EDMA_ERROR_IRQ_PRIO; #if defined(SPC5_EDMA_MUX_PCTL) /* DMA MUX PCTL setup, only if required.*/ halSPCSetPeripheralClockMode(SPC5_EDMA_MUX_PCTL, SPC5_EDMA_MUX_START_PCTL); #endif } /** * @brief EDMA channel allocation. * * @param[in] ccfg channel configuration * @return The channel number. * @retval EDMA_ERROR if the channel cannot be allocated. * * @special */ edma_channel_t edmaChannelAllocate(const edma_channel_config_t *ccfg) { osalDbgCheck((ccfg != NULL) && (ccfg->dma_irq_prio < 16)); /* If the channel is already taken then an error is returned.*/ if (channels[ccfg->dma_channel] != NULL) return EDMA_ERROR; /* Already taken. */ #if SPC5_EDMA_HAS_MUX /* Programming the MUX.*/ SPC5_DMAMUX.CHCONFIG[ccfg->dma_channel].R = (uint8_t)(0x80 | ccfg->dma_periph); #endif /* !SPC5_EDMA_HAS_MUX */ /* Associating the configuration to the channel.*/ channels[ccfg->dma_channel] = ccfg; /* If an error callback is defined then the error interrupt source is enabled for the channel.*/ if (ccfg->dma_error_func != NULL) SPC5_EDMA.SEEIR.R = (uint32_t)ccfg->dma_channel; /* Setting up IRQ priority for the selected channel.*/ INTC.PSR[11 + ccfg->dma_channel].R = ccfg->dma_irq_prio; return ccfg->dma_channel; } /** * @brief EDMA channel release. * * @param[in] channel the channel number * * @special */ void edmaChannelRelease(edma_channel_t channel) { osalDbgCheck((channel >= 0) && (channel < SPC5_EDMA_NCHANNELS)); osalDbgAssert(channels[channel] != NULL, "not allocated"); /* Enforcing a stop.*/ edmaChannelStop(channel); #if SPC5_EDMA_HAS_MUX /* Disabling the MUX slot.*/ SPC5_DMAMUX.CHCONFIG[channel].R = 0; #endif /* Clearing ISR sources for the channel.*/ SPC5_EDMA.CIRQR.R = channel; SPC5_EDMA.CEEIR.R = channel; SPC5_EDMA.CER.R = channel; /* The channels is flagged as available.*/ channels[channel] = NULL; } #endif /* SPC5_HAS_EDMA */ /** @} */
apache-2.0
tylerroyal/api-checker
core/src/test/scala/com/rackspace/com/papi/components/checker/wadl/BaseStepSpec.scala
5219
/*** * Copyright 2014 Rackspace US, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.rackspace.com.papi.components.checker.wadl import javax.xml.namespace.QName import com.rackspace.cloud.api.wadl.test.BaseWADLSpec import com.rackspace.com.papi.components.checker.step._ import com.rackspace.com.papi.components.checker.step.base.{ConnectedStep, Step} import com.rackspace.com.papi.components.checker.step.startend._ import org.scalatest.exceptions.TestFailedException import scala.collection.mutable.LinkedList class BaseStepSpec extends BaseWADLSpec { var builder = new StepBuilder(wadl) def withStart(a : Array[Step]) : Array[Step] = a.filter (f => f.isInstanceOf[Start]) def withAccept(a : Array[Step]) : Array[Step] = a.filter (f => f.isInstanceOf[Accept]) def withURLFail(a : Array[Step]) : Array[Step] = a.filter (f => f.isInstanceOf[URLFail]) def withMethodFail(a : Array[Step]) : Array[Step] = a.filter (f => f.isInstanceOf[MethodFail]) def withContentFail (a : Array[Step]) : Array[Step] = a.filter(f => f.isInstanceOf[ContentFail]) def withWellXML(a : Array[Step]) : Array[Step] = a.filter (f => f.isInstanceOf[WellFormedXML]) def withWellJSON(a : Array[Step]) : Array[Step] = a.filter (f => f.isInstanceOf[WellFormedJSON]) def withXSD(a : Array[Step]) : Array[Step] = a.filter (f => f.isInstanceOf[XSD]) def withURLFailMatch(a : Array[Step], mat : String) : Array[Step] = a.filter (f => f.isInstanceOf[URLFailMatch]).filter(f => f.asInstanceOf[URLFailMatch].uri.toString == mat) def withMethodFailMatch(a : Array[Step], mat : String) : Array[Step] = a.filter (f => f.isInstanceOf[MethodFailMatch]).filter(f => f.asInstanceOf[MethodFailMatch].method.toString == mat) def withURI(a : Array[Step], uri : String) : Array[Step] = a.filter (f => f.isInstanceOf[URI]).filter(f => f.asInstanceOf[URI].uri.toString == uri) def withReqType(a : Array[Step], reqType : String) : Array[Step] = a.filter (f => f.isInstanceOf[ReqType]).filter(f => f.asInstanceOf[ReqType].rtype.toString == reqType) def withXPath(a : Array[Step], exp : String) : Array[Step] = a.filter (f => f.isInstanceOf[XPath]).filter(f => f.asInstanceOf[XPath].expression == exp) def withReqTypeFail(a : Array[Step], types : String) : Array[Step] = a.filter (f => f.isInstanceOf[ReqTypeFail]).filter(f => f.asInstanceOf[ReqTypeFail].types.toString == types) def withURIXSD(a : Array[Step], qname : QName) : Array[Step] = a.filter (f => f.isInstanceOf[URIXSD]).filter(f => f.asInstanceOf[URIXSD].xsd.simpleType == qname) def withMethod(a : Array[Step], method : String) : Array[Step] = a.filter (f => f.isInstanceOf[Method]).filter(f => f.asInstanceOf[Method]. method.toString == method) def withLabel(a : Array[Step], label : String) : Array[Step] = a.filter (f => f.label == label) def Start : (Array[Step]) => Array[Step] = withStart def Accept : (Array[Step]) => Array[Step] = withAccept def URLFail : (Array[Step]) => Array[Step] = withURLFail def MethodFail : (Array[Step]) => Array[Step] = withMethodFail def ContentFail : (Array[Step]) => Array[Step] = withContentFail def WellFormedXML : (Array[Step]) => Array[Step] = withWellXML def WellFormedJSON : (Array[Step]) => Array[Step] = withWellJSON def XSD : (Array[Step]) => Array[Step] = withXSD def URLFailMatch(m : String) : (Array[Step]) => Array[Step] = withURLFailMatch(_, m) def MethodFailMatch(m : String) : (Array[Step]) => Array[Step] = withMethodFailMatch(_, m) def URI(m : String) : (Array[Step]) => Array[Step] = withURI(_, m) def URIXSD(m : QName) : (Array[Step]) => Array[Step] = withURIXSD(_, m) def Method(m : String) : (Array[Step]) => Array[Step] = withMethod(_, m) def Label(m : String) : (Array[Step]) => Array[Step] = withLabel(_, m) def ReqType(m : String) : (Array[Step]) => Array[Step] = withReqType(_, m) def ReqTypeFail(m : String) : (Array[Step]) => Array[Step] = withReqTypeFail(_, m) def XPath(m : String) : (Array[Step]) => Array[Step] = withXPath(_, m) def assert(s : Step, step_funs : ((Array[Step]) => Array[Step])*) : Unit = { if (step_funs.length == 0) throw new TestFailedException("Path assertion should contain at least one step!",4) var next : Array[Step] = Array(s) for (a <- 0 to step_funs.length - 1) { val result : Array[Step] = step_funs(a)(next) if (result.length == 0) { throw new TestFailedException("Could not complete path",4) } var list : LinkedList[Step] = new LinkedList[Step] result.filter(f => f.isInstanceOf[ConnectedStep]).foreach(r => list ++= r.asInstanceOf[ConnectedStep].next) next = list.toArray } } }
apache-2.0
guifre/crawljax
src/test/java/com/crawljax/oracle/OracleTest.java
10464
package com.crawljax.oracle; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import com.crawljax.oraclecomparator.Comparator; import com.crawljax.oraclecomparator.comparators.AttributeComparator; import com.crawljax.oraclecomparator.comparators.DateComparator; import com.crawljax.oraclecomparator.comparators.EditDistanceComparator; import com.crawljax.oraclecomparator.comparators.PlainStructureComparator; import com.crawljax.oraclecomparator.comparators.RegexComparator; import com.crawljax.oraclecomparator.comparators.ScriptComparator; import com.crawljax.oraclecomparator.comparators.SimpleComparator; import com.crawljax.oraclecomparator.comparators.StyleComparator; import com.crawljax.oraclecomparator.comparators.XPathExpressionComparator; import org.junit.Test; /** * @author danny * @version $Id: OracleTest.java 441 2010-09-13 19:28:10Z [email protected] $ */ public class OracleTest { private void compareTwoDomsWithComparatorEqual( String original, String newDom, Comparator comparator) { comparator.setOriginalDom(original); comparator.setNewDom(newDom); assertTrue(comparator.isEquivalent()); } private void compareTwoDomsWithComparatorNotEqual( String original, String newDom, Comparator comparator) { comparator.setOriginalDom(original); comparator.setNewDom(newDom); assertFalse(comparator.isEquivalent()); } @Test public void testDateOracle() { Comparator oracle = new DateComparator(); /* dates with days */ compareTwoDomsWithComparatorEqual("<HTML>Monday 15 march 1998</HTML>", "<HTML>Tuesday 13 december 2005</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>Monday 1 feb '98</HTML>", "<HTML>Wednesday 15 march '00</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>Friday 10 february</HTML>", "<HTML>Wednesday 3 march</HTML>", oracle); /* dates only numeric */ compareTwoDomsWithComparatorEqual( "<HTML>28-12-1983</HTML>", "<HTML>15-3-1986</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>28.1.1976</HTML>", "<HTML>3.15.1986</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>1/1/2001</HTML>", "<HTML>30/12/1988</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>28-12-1983</HTML>", "<HTML>19-2-1986</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>28.1.1976</HTML>", "<HTML>3.15.1986</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>1/1/2001</HTML>", "<HTML>30/12/1988</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>28-12-'83</HTML>", "<HTML>19-1-'86</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>28.1.'76</HTML>", "<HTML>3.15.'86</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>1/1/'01</HTML>", "<HTML>30/12/'88</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>2003-16-03</HTML>", "<HTML>1986-3-3</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>1993.12.12</HTML>", "<HTML>1997.13.09</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>2013/1/3</HTML>", "<HTML>1986/3/3</HTML>", oracle); /* dates with long months */ compareTwoDomsWithComparatorEqual( "<HTML>19 november 1986</HTML>", "<HTML>18 june 1973</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>1th march 1986</HTML>", "<HTML>28th december 2005</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>15th november</HTML>", "<HTML>3th july</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>2003 March 15</HTML>", "<HTML>1978 july 5</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>2003Apr15</HTML>", "<HTML>1978jul5</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>March 2003</HTML>", "<HTML>October 1996</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>April '02</HTML>", "<HTML>August '99</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>April 19 2007</HTML>", "<HTML>January 1 1994</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>April 19, 2007</HTML>", "<HTML>January 1, 1994</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>April 4 '07</HTML>", "<HTML>January 1 '87</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>April 19, '66</HTML>", "<HTML>January 1, '88</HTML>", oracle); /* time */ compareTwoDomsWithComparatorEqual( "<HTML>4:47:00 am</HTML>", "<HTML>3:59:2PM</HTML>", oracle); compareTwoDomsWithComparatorEqual("<HTML>2:13pm</HTML>", "<HTML>3:59am</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML>14:17:29</HTML>", "<HTML>7:34:26</HTML>", oracle); } @Test public void testStyleOracle() { Comparator oracle = new StyleComparator(); /* IGNORE_TAGS */ compareTwoDomsWithComparatorEqual("<HTML><B>foo</B></HTML>", "<HTML>foo</HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML><PRE>foo</PRE></HTML>", "<HTML><STRONG>foo</STRONG></HTML>", oracle); compareTwoDomsWithComparatorEqual("<HTML><FONT color=\"red\">foo</FONT> bar</HTML>", "<HTML>foo bar</HTML>", oracle); compareTwoDomsWithComparatorEqual("<HTML><FONT color=\"red\">foo</FONT> bar</HTML>", "<HTML><FONT color=\"green\">foo</FONT> bar</HTML>", oracle); /* IGNORE_ATTRIBUTES */ compareTwoDomsWithComparatorEqual("<HTML><SPAN width=\"100px\">foo</SPAN></HTML>", "<HTML><SPAN>foo</SPAN></HTML>", oracle); compareTwoDomsWithComparatorEqual("<HTML><SPAN>foo</SPAN></HTML>", "<HTML><SPAN valign=\"top\">foo</SPAN></HTML>", oracle); /* STYLE ATTRIBUTES */ compareTwoDomsWithComparatorEqual( "<HTML><SPAN style=\"color: green;\">foo</SPAN></HTML>", "<HTML><SPAN style=\"color:red;\">foo</SPAN></HTML>", oracle); compareTwoDomsWithComparatorEqual("<HTML><SPAN style=\"color: yellow\">foo</SPAN></HTML>", "<HTML><SPAN>foo</SPAN></HTML>", oracle); compareTwoDomsWithComparatorEqual( "<HTML><SPAN style=\"display:inline;color:red;\">foo</SPAN></HTML>", "<HTML><SPAN style=\"display:inline; color:green;\">foo</SPAN></HTML>", oracle); compareTwoDomsWithComparatorNotEqual( "<HTML><SPAN style=\"display:inline;color:red;\">foo</SPAN></HTML>", "<HTML><SPAN style=\"display:none; color:green;\">foo</SPAN></HTML>", oracle); } @Test public void testSimpleOracle() { Comparator oracle = new SimpleComparator(); compareTwoDomsWithComparatorEqual("<HTML>\n\n<SPAN>\n foo\n</SPAN></HTML>", "<HTML>\n<SPAN>\n foo \n\n</SPAN>\n</HTML>", oracle); } @Test public void testRegexOracle() { Comparator oracle = new RegexComparator("[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}\\.[0-9]{1,3}"); compareTwoDomsWithComparatorEqual( "<HTML>192.168.1.1</HTML>", "<HTML>10.0.0.138</HTML>", oracle); } @Test public void testAttributeOracle() { String control = "<HTML><A href=\"foo.html\" myattr=\"true\">foo</A><HTML>"; String test = "<HTML><A href=\"foo.html\" myattr=\"false\">foo</A><HTML>"; compareTwoDomsWithComparatorEqual(control, test, new AttributeComparator("myattr")); } @Test public void testPlainStructureOracle() { String control = "<HTML><A href=\"foo.html\" jquery12421421=\"bla\" myattr=\"true\">foo</A><HTML>"; String test = "<HTML><A></A><HTML>"; compareTwoDomsWithComparatorEqual( control, test, new PlainStructureComparator(control, test)); } @Test public void testScriptComparator() { String control = "<HTML><head><script>JavaScript();</script><title>Test</title></head><body><script>JavaScript23();</script>test</body><HTML>"; String test = "<HTML><head><title>Test</title></head><body>test</body><HTML>"; compareTwoDomsWithComparatorEqual(control, test, new ScriptComparator(control, test)); } @Test public void testEditDistanceComparator() { String control = "<HTML><head><title>Test</title></head><body>test</body><HTML>"; String test = "<HTML><head><title>Test</title></head><body>test</body><HTML>"; assertTrue(control.equals(test)); compareTwoDomsWithComparatorEqual(control, test, new EditDistanceComparator(0)); compareTwoDomsWithComparatorEqual(control, test, new EditDistanceComparator(1)); test = "TheIsAlotOfRubish"; compareTwoDomsWithComparatorNotEqual(control, test, new EditDistanceComparator(1)); compareTwoDomsWithComparatorEqual(control, test, new EditDistanceComparator(0)); // We miss the title test = "<HTML><head></head><body>test</body><HTML>"; Comparator oracle = new EditDistanceComparator(0.5); compareTwoDomsWithComparatorEqual(control, test, oracle); compareTwoDomsWithComparatorNotEqual(control, test, new EditDistanceComparator(1)); compareTwoDomsWithComparatorEqual(control, test, new EditDistanceComparator(0)); } @Test public void testXPathExpressionComparator() { String control = "<HTML><head><title>Test</title></head><body>test</body><HTML>"; String test = "<HTML><head><title>Test</title></head><body>test</body><HTML>"; assertTrue(control.equals(test)); XPathExpressionComparator oracle = new XPathExpressionComparator(); compareTwoDomsWithComparatorEqual(control, test, oracle); compareTwoDomsWithComparatorEqual( control, test, new XPathExpressionComparator(control, test)); test = "<HTML><head><title>Test</title></head><body>test<div id='ignoreme'>" + "ignoreme</div></body><HTML>"; compareTwoDomsWithComparatorNotEqual(control, test, oracle); compareTwoDomsWithComparatorNotEqual( control, test, new XPathExpressionComparator(control, test)); oracle.addExpression("//*[@id='ignoreme']"); compareTwoDomsWithComparatorEqual(control, test, oracle); compareTwoDomsWithComparatorEqual(test, control, oracle); control = "<HTML><head><title>Test</title></head><body>test<div id='ignoreme'>" + "ignoreme123</div></body><HTML>"; compareTwoDomsWithComparatorEqual(control, test, oracle); compareTwoDomsWithComparatorEqual(test, control, oracle); } }
apache-2.0
Kurento/kms-elements
src/server/implementation/objects/RecorderEndpointImpl.cpp
10886
/* * (C) Copyright 2016 Kurento (http://kurento.org/) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include <gst/gst.h> #include "MediaType.hpp" #include "MediaPipeline.hpp" #include "MediaProfileSpecType.hpp" #include "GapsFixMethod.hpp" #include <RecorderEndpointImplFactory.hpp> #include "RecorderEndpointImpl.hpp" #include <jsonrpc/JsonSerializer.hpp> #include <KurentoException.hpp> #include <gst/gst.h> #include <commons/kmsrecordingprofile.h> #include "StatsType.hpp" #include "EndpointStats.hpp" #include <commons/kmsutils.h> #include <commons/kmsstats.h> #include <SignalHandler.hpp> #include <functional> #define GST_CAT_DEFAULT kurento_recorder_endpoint_impl GST_DEBUG_CATEGORY_STATIC (GST_CAT_DEFAULT); #define GST_DEFAULT_NAME "KurentoRecorderEndpointImpl" #define FACTORY_NAME "recorderendpoint" #define PARAM_GAPS_FIX "gapsFix" #define PROP_GAPS_FIX "gaps-fix" #define TIMEOUT 4 /* seconds */ namespace kurento { typedef enum { KMS_URI_END_POINT_STATE_STOP, KMS_URI_END_POINT_STATE_START, KMS_URI_END_POINT_STATE_PAUSE } KmsUriEndPointState; bool RecorderEndpointImpl::support_ksr; static bool check_support_for_ksr () { GstPlugin *plugin = nullptr; bool supported; plugin = gst_plugin_load_by_name ("kmsrecorder"); supported = plugin != nullptr; g_clear_object (&plugin); return supported; } RecorderEndpointImpl::RecorderEndpointImpl (const boost::property_tree::ptree &conf, std::shared_ptr<MediaPipeline> mediaPipeline, const std::string &uri, std::shared_ptr<MediaProfileSpecType> mediaProfile, bool stopOnEndOfStream) : UriEndpointImpl (conf, std::dynamic_pointer_cast<MediaObjectImpl> (mediaPipeline), FACTORY_NAME, uri) { g_object_set (G_OBJECT (getGstreamerElement() ), "accept-eos", stopOnEndOfStream, NULL); switch (mediaProfile->getValue() ) { case MediaProfileSpecType::WEBM: g_object_set ( G_OBJECT (element), "profile", KMS_RECORDING_PROFILE_WEBM, NULL); GST_INFO ("Set WEBM profile"); break; case MediaProfileSpecType::MP4: g_object_set ( G_OBJECT (element), "profile", KMS_RECORDING_PROFILE_MP4, NULL); GST_INFO ("Set MP4 profile"); break; case MediaProfileSpecType::MKV: g_object_set ( G_OBJECT (element), "profile", KMS_RECORDING_PROFILE_MKV, NULL); GST_INFO ("Set MKV profile"); break; case MediaProfileSpecType::WEBM_VIDEO_ONLY: g_object_set ( G_OBJECT (element), "profile", KMS_RECORDING_PROFILE_WEBM_VIDEO_ONLY, NULL); GST_INFO ("Set WEBM VIDEO ONLY profile"); break; case MediaProfileSpecType::WEBM_AUDIO_ONLY: g_object_set ( G_OBJECT (element), "profile", KMS_RECORDING_PROFILE_WEBM_AUDIO_ONLY, NULL); GST_INFO ("Set WEBM AUDIO ONLY profile"); break; case MediaProfileSpecType::MKV_VIDEO_ONLY: g_object_set ( G_OBJECT (element), "profile", KMS_RECORDING_PROFILE_MKV_VIDEO_ONLY, NULL); GST_INFO ("Set MKV VIDEO ONLY profile"); break; case MediaProfileSpecType::MKV_AUDIO_ONLY: g_object_set ( G_OBJECT (element), "profile", KMS_RECORDING_PROFILE_MKV_AUDIO_ONLY, NULL); GST_INFO ("Set MKV AUDIO ONLY profile"); break; case MediaProfileSpecType::MP4_VIDEO_ONLY: g_object_set ( G_OBJECT (element), "profile", KMS_RECORDING_PROFILE_MP4_VIDEO_ONLY, NULL); GST_INFO ("Set MP4 VIDEO ONLY profile"); break; case MediaProfileSpecType::MP4_AUDIO_ONLY: g_object_set ( G_OBJECT (element), "profile", KMS_RECORDING_PROFILE_MP4_AUDIO_ONLY, NULL); GST_INFO ("Set MP4 AUDIO ONLY profile"); break; case MediaProfileSpecType::JPEG_VIDEO_ONLY: g_object_set ( G_OBJECT (element), "profile", KMS_RECORDING_PROFILE_JPEG_VIDEO_ONLY, NULL); GST_INFO ("Set JPEG profile"); break; case MediaProfileSpecType::KURENTO_SPLIT_RECORDER: if (!RecorderEndpointImpl::support_ksr) { throw KurentoException (MEDIA_OBJECT_ILLEGAL_PARAM_ERROR, "Kurento Split Recorder not supported"); } g_object_set ( G_OBJECT (element), "profile", KMS_RECORDING_PROFILE_KSR, NULL); GST_INFO ("Set KSR profile"); break; } GapsFixMethod gapsFix; if (getConfigValue<GapsFixMethod, RecorderEndpoint> ( &gapsFix, PARAM_GAPS_FIX)) { GST_INFO ("Set RecorderEndpoint gaps fix mode: %s", gapsFix.getString ().c_str ()); g_object_set (getGstreamerElement (), PROP_GAPS_FIX, gapsFix.getValue (), NULL); } } void RecorderEndpointImpl::postConstructor() { UriEndpointImpl::postConstructor(); handlerOnStateChanged = register_signal_handler (G_OBJECT (element), "state-changed", std::function <void (GstElement *, gint) > (std::bind (&RecorderEndpointImpl::onStateChanged, this, std::placeholders::_2) ), std::dynamic_pointer_cast<RecorderEndpointImpl> (shared_from_this() ) ); } void RecorderEndpointImpl::onStateChanged (gint newState) { switch (newState) { case KMS_URI_END_POINT_STATE_STOP: { GST_DEBUG_OBJECT (element, "State changed to Stopped"); try { Stopped event (shared_from_this (), Stopped::getName ()); sigcSignalEmit(signalStopped, event); } catch (const std::bad_weak_ptr &e) { // shared_from_this() GST_ERROR ("BUG creating %s: %s", Stopped::getName ().c_str (), e.what ()); } break; } case KMS_URI_END_POINT_STATE_START: { GST_DEBUG_OBJECT (element, "State changed to Recording"); try { Recording event (shared_from_this(), Recording::getName () ); sigcSignalEmit(signalRecording, event); } catch (const std::bad_weak_ptr &e) { // shared_from_this() GST_ERROR ("BUG creating %s: %s", Recording::getName ().c_str (), e.what ()); } break; } case KMS_URI_END_POINT_STATE_PAUSE: { GST_DEBUG_OBJECT (element, "State changed to Paused"); try { Paused event (shared_from_this(), Paused::getName () ); sigcSignalEmit(signalPaused, event); } catch (const std::bad_weak_ptr &e) { // shared_from_this() GST_ERROR ("BUG creating %s: %s", Paused::getName ().c_str (), e.what ()); } break; } } std::unique_lock<std::mutex> lck (mtx); GST_TRACE_OBJECT (element, "State changed to %d", newState); state = newState; cv.notify_one(); } void RecorderEndpointImpl::waitForStateChange (gint expectedState) { std::unique_lock<std::mutex> lck (mtx); if (!cv.wait_for (lck, std::chrono::seconds (TIMEOUT), [&] {return expectedState == state;}) ) { GST_ERROR_OBJECT (element, "STATE did not changed to %d in %d seconds", expectedState, TIMEOUT); } } void RecorderEndpointImpl::release () { gint state = -1; g_object_get (getGstreamerElement(), "state", &state, NULL); if (state == 0 /* stop */) { goto end; } stopAndWait(); end: UriEndpointImpl::release(); } RecorderEndpointImpl::~RecorderEndpointImpl() { gint state = -1; if (handlerOnStateChanged > 0) { unregister_signal_handler (element, handlerOnStateChanged); } g_object_get (getGstreamerElement(), "state", &state, NULL); if (state != 0 /* stop */) { GST_ERROR ("Recorder should be stopped when reaching this point"); } } void RecorderEndpointImpl::record () { start(); } void RecorderEndpointImpl::stopAndWait () { stop(); waitForStateChange (KMS_URI_END_POINT_STATE_STOP); } static void setDeprecatedProperties (std::shared_ptr<EndpointStats> eStats) { std::vector<std::shared_ptr<MediaLatencyStat>> inStats = eStats->getE2ELatency(); for (auto &inStat : inStats) { if (inStat->getName() == "sink_audio_default") { eStats->setAudioE2ELatency(inStat->getAvg()); } else if (inStat->getName() == "sink_video_default") { eStats->setVideoE2ELatency(inStat->getAvg()); } } } void RecorderEndpointImpl::collectEndpointStats (std::map <std::string, std::shared_ptr<Stats>> &statsReport, std::string id, const GstStructure *stats, double timestamp, int64_t timestampMillis) { std::shared_ptr<Stats> endpointStats; GstStructure *e2e_stats; std::vector<std::shared_ptr<MediaLatencyStat>> inputStats; std::vector<std::shared_ptr<MediaLatencyStat>> e2eStats; if (gst_structure_get (stats, "e2e-latencies", GST_TYPE_STRUCTURE, &e2e_stats, NULL) ) { collectLatencyStats (e2eStats, e2e_stats); gst_structure_free (e2e_stats); } endpointStats = std::make_shared <EndpointStats> (id, std::make_shared <StatsType> (StatsType::endpoint), timestamp, timestampMillis, 0.0, 0.0, inputStats, 0.0, 0.0, e2eStats); setDeprecatedProperties (std::dynamic_pointer_cast <EndpointStats> (endpointStats) ); statsReport[id] = endpointStats; } void RecorderEndpointImpl::fillStatsReport (std::map <std::string, std::shared_ptr<Stats>> &report, const GstStructure *stats, double timestamp, int64_t timestampMillis) { const GstStructure *e_stats; e_stats = kms_utils_get_structure_by_name (stats, KMS_MEDIA_ELEMENT_FIELD); if (e_stats != nullptr) { collectEndpointStats (report, getId (), e_stats, timestamp, timestampMillis); } UriEndpointImpl::fillStatsReport (report, stats, timestamp, timestampMillis); } MediaObjectImpl * RecorderEndpointImplFactory::createObject (const boost::property_tree::ptree &conf, std::shared_ptr<MediaPipeline> mediaPipeline, const std::string &uri, std::shared_ptr<MediaProfileSpecType> mediaProfile, bool stopOnEndOfStream) const { return new RecorderEndpointImpl (conf, mediaPipeline, uri, mediaProfile, stopOnEndOfStream); } RecorderEndpointImpl::StaticConstructor RecorderEndpointImpl::staticConstructor; RecorderEndpointImpl::StaticConstructor::StaticConstructor() { RecorderEndpointImpl::support_ksr = check_support_for_ksr(); GST_DEBUG_CATEGORY_INIT (GST_CAT_DEFAULT, GST_DEFAULT_NAME, 0, GST_DEFAULT_NAME); } } /* kurento */
apache-2.0
samvera/hyrax
app/models/concerns/hyrax/solr_document/metadata.rb
4358
# frozen_string_literal: true module Hyrax module SolrDocument module Metadata extend ActiveSupport::Concern class_methods do def attribute(name, type, field) define_method name do type.coerce(self[field]) end end end module Solr class Array # @return [Array] def self.coerce(input) ::Array.wrap(input) end end class String # @return [String] def self.coerce(input) ::Array.wrap(input).first end end class Date # @return [Date] def self.coerce(input) field = String.coerce(input) return if field.blank? begin ::Date.parse(field) rescue ArgumentError Hyrax.logger.info "Unable to parse date: #{field.first.inspect}" end end end end included do attribute :alternative_title, Solr::Array, "alternative_title_tesim" attribute :identifier, Solr::Array, "identifier_tesim" attribute :based_near, Solr::Array, "based_near_tesim" attribute :based_near_label, Solr::Array, "based_near_label_tesim" attribute :related_url, Solr::Array, "related_url_tesim" attribute :resource_type, Solr::Array, "resource_type_tesim" attribute :edit_groups, Solr::Array, ::Ability.edit_group_field attribute :edit_people, Solr::Array, ::Ability.edit_user_field attribute :read_groups, Solr::Array, ::Ability.read_group_field attribute :collection_ids, Solr::Array, 'collection_ids_tesim' attribute :admin_set, Solr::Array, "admin_set_tesim" attribute :admin_set_id, Solr::Array, "admin_set_id_ssim" attribute :member_ids, Solr::Array, "member_ids_ssim" attribute :member_of_collection_ids, Solr::Array, "member_of_collection_ids_ssim" attribute :member_of_collections, Solr::Array, "member_of_collections_ssim" attribute :description, Solr::Array, "description_tesim" attribute :abstract, Solr::Array, "abstract_tesim" attribute :title, Solr::Array, "title_tesim" attribute :contributor, Solr::Array, "contributor_tesim" attribute :subject, Solr::Array, "subject_tesim" attribute :publisher, Solr::Array, "publisher_tesim" attribute :language, Solr::Array, "language_tesim" attribute :keyword, Solr::Array, "keyword_tesim" attribute :license, Solr::Array, "license_tesim" attribute :source, Solr::Array, "source_tesim" attribute :date_created, Solr::Array, "date_created_tesim" attribute :rights_statement, Solr::Array, "rights_statement_tesim" attribute :rights_notes, Solr::Array, "rights_notes_tesim" attribute :access_right, Solr::Array, "access_right_tesim" attribute :mime_type, Solr::String, "mime_type_ssi" attribute :workflow_state, Solr::String, "workflow_state_name_ssim" attribute :human_readable_type, Solr::String, "human_readable_type_tesim" attribute :representative_id, Solr::String, "hasRelatedMediaFragment_ssim" # extract the term name from the rendering_predicate (it might be after the final / or #) attribute :rendering_ids, Solr::Array, Hyrax.config.rendering_predicate.value.split(/#|\/|,/).last + "_ssim" attribute :thumbnail_id, Solr::String, "hasRelatedImage_ssim" attribute :thumbnail_path, Solr::String, CatalogController.blacklight_config.index.thumbnail_field attribute :label, Solr::String, "label_tesim" attribute :file_format, Solr::String, "file_format_tesim" attribute :suppressed?, Solr::String, "suppressed_bsi" attribute :original_file_id, Solr::String, "original_file_id_ssi" attribute :date_modified, Solr::Date, "date_modified_dtsi" attribute :date_uploaded, Solr::Date, "date_uploaded_dtsi" attribute :create_date, Solr::Date, "system_create_dtsi" attribute :modified_date, Solr::Date, "system_modified_dtsi" attribute :embargo_release_date, Solr::Date, Hydra.config.permissions.embargo.release_date attribute :lease_expiration_date, Solr::Date, Hydra.config.permissions.lease.expiration_date end end end end
apache-2.0
jhpx/Sunshine-Version-2
app/src/main/java/com/example/android/sunshine/app/data/WeatherProvider.java
14322
/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.example.android.sunshine.app.data; import android.annotation.TargetApi; import android.content.ContentProvider; import android.content.ContentValues; import android.content.UriMatcher; import android.database.Cursor; import android.database.sqlite.SQLiteDatabase; import android.database.sqlite.SQLiteQueryBuilder; import android.net.Uri; public class WeatherProvider extends ContentProvider { // The URI Matcher used by this content provider. private static final UriMatcher sUriMatcher = buildUriMatcher(); private WeatherDbHelper mOpenHelper; static final int WEATHER = 100; static final int WEATHER_WITH_LOCATION = 101; static final int WEATHER_WITH_LOCATION_AND_DATE = 102; static final int LOCATION = 300; private static final SQLiteQueryBuilder sWeatherByLocationSettingQueryBuilder; static { sWeatherByLocationSettingQueryBuilder = new SQLiteQueryBuilder(); //This is an inner join which looks like //weather INNER JOIN location ON weather.location_id = location._id sWeatherByLocationSettingQueryBuilder.setTables( WeatherContract.WeatherEntry.TABLE_NAME + " INNER JOIN " + WeatherContract.LocationEntry.TABLE_NAME + " ON " + WeatherContract.WeatherEntry.TABLE_NAME + "." + WeatherContract.WeatherEntry.COLUMN_LOC_KEY + " = " + WeatherContract.LocationEntry.TABLE_NAME + "." + WeatherContract.LocationEntry._ID); } //location.location_setting = ? private static final String sLocationSettingSelection = WeatherContract.LocationEntry.TABLE_NAME + "." + WeatherContract.LocationEntry.COLUMN_LOCATION_SETTING + " = ? "; //location.location_setting = ? AND date >= ? private static final String sLocationSettingWithStartDateSelection = WeatherContract.LocationEntry.TABLE_NAME + "." + WeatherContract.LocationEntry.COLUMN_LOCATION_SETTING + " = ? AND " + WeatherContract.WeatherEntry.COLUMN_DATE + " >= ? "; //location.location_setting = ? AND date = ? private static final String sLocationSettingAndDaySelection = WeatherContract.LocationEntry.TABLE_NAME + "." + WeatherContract.LocationEntry.COLUMN_LOCATION_SETTING + " = ? AND " + WeatherContract.WeatherEntry.COLUMN_DATE + " = ? "; private Cursor getWeatherByLocationSetting(Uri uri, String[] projection, String sortOrder) { String locationSetting = WeatherContract.WeatherEntry.getLocationSettingFromUri(uri); long startDate = WeatherContract.WeatherEntry.getStartDateFromUri(uri); String[] selectionArgs; String selection; if (startDate == 0) { selection = sLocationSettingSelection; selectionArgs = new String[]{locationSetting}; } else { selectionArgs = new String[]{locationSetting, Long.toString(startDate)}; selection = sLocationSettingWithStartDateSelection; } return sWeatherByLocationSettingQueryBuilder.query(mOpenHelper.getReadableDatabase(), projection, selection, selectionArgs, null, null, sortOrder ); } private Cursor getWeatherByLocationSettingAndDate( Uri uri, String[] projection, String sortOrder) { String locationSetting = WeatherContract.WeatherEntry.getLocationSettingFromUri(uri); long date = WeatherContract.WeatherEntry.getDateFromUri(uri); return sWeatherByLocationSettingQueryBuilder.query(mOpenHelper.getReadableDatabase(), projection, sLocationSettingAndDaySelection, new String[]{locationSetting, Long.toString(date)}, null, null, sortOrder ); } /* Students: Here is where you need to create the UriMatcher. This UriMatcher will match each URI to the WEATHER, WEATHER_WITH_LOCATION, WEATHER_WITH_LOCATION_AND_DATE, and LOCATION integer constants defined above. You can test this by uncommenting the testUriMatcher test within TestUriMatcher. */ static UriMatcher buildUriMatcher() { // I know what you're thinking. Why create a UriMatcher when you can use regular // expressions instead? Because you're not crazy, that's why. // All paths added to the UriMatcher have a corresponding code to return when a match is // found. The code passed into the constructor represents the code to return for the root // URI. It's common to use NO_MATCH as the code for this case. final UriMatcher matcher = new UriMatcher(UriMatcher.NO_MATCH); final String authority = WeatherContract.CONTENT_AUTHORITY; // For each type of URI you want to add, create a corresponding code. matcher.addURI(authority, WeatherContract.PATH_WEATHER, WEATHER); matcher.addURI(authority, WeatherContract.PATH_WEATHER + "/*", WEATHER_WITH_LOCATION); matcher.addURI(authority, WeatherContract.PATH_WEATHER + "/*/#", WEATHER_WITH_LOCATION_AND_DATE); matcher.addURI(authority, WeatherContract.PATH_LOCATION, LOCATION); return matcher; } /* Students: We've coded this for you. We just create a new WeatherDbHelper for later use here. */ @Override public boolean onCreate() { mOpenHelper = new WeatherDbHelper(getContext()); return true; } /* Students: Here's where you'll code the getType function that uses the UriMatcher. You can test this by uncommenting testGetType in TestProvider. */ @Override public String getType(Uri uri) { // Use the Uri Matcher to determine what kind of URI this is. final int match = sUriMatcher.match(uri); switch (match) { // Student: Uncomment and fill out these two cases case WEATHER_WITH_LOCATION_AND_DATE: return WeatherContract.WeatherEntry.CONTENT_ITEM_TYPE; case WEATHER_WITH_LOCATION: return WeatherContract.WeatherEntry.CONTENT_TYPE; case WEATHER: return WeatherContract.WeatherEntry.CONTENT_TYPE; case LOCATION: return WeatherContract.LocationEntry.CONTENT_TYPE; default: throw new UnsupportedOperationException("Unknown uri: " + uri); } } @Override public Cursor query(Uri uri, String[] projection, String selection, String[] selectionArgs, String sortOrder) { // Here's the switch statement that, given a URI, will determine what kind of request it is, // and query the database accordingly. Cursor retCursor; switch (sUriMatcher.match(uri)) { // "weather/*/*" case WEATHER_WITH_LOCATION_AND_DATE: retCursor = getWeatherByLocationSettingAndDate(uri, projection, sortOrder); break; // "weather/*" case WEATHER_WITH_LOCATION: retCursor = getWeatherByLocationSetting(uri, projection, sortOrder); break; // "weather" case WEATHER: retCursor = mOpenHelper.getReadableDatabase().query( WeatherContract.WeatherEntry.TABLE_NAME, projection, selection, selectionArgs, null, null, sortOrder ); break; // "location" case LOCATION: retCursor = mOpenHelper.getReadableDatabase().query( WeatherContract.LocationEntry.TABLE_NAME, projection, selection, selectionArgs, null, null, sortOrder ); break; default: throw new UnsupportedOperationException("Unknown uri: " + uri); } retCursor.setNotificationUri(getContext().getContentResolver(), uri); return retCursor; } /* Student: Add the ability to insert Locations to the implementation of this function. */ @Override public Uri insert(Uri uri, ContentValues values) { final SQLiteDatabase db = mOpenHelper.getWritableDatabase(); final int match = sUriMatcher.match(uri); Uri returnUri; switch (match) { case WEATHER: { normalizeDate(values); long _id = db.insert(WeatherContract.WeatherEntry.TABLE_NAME, null, values); if (_id > 0) returnUri = WeatherContract.WeatherEntry.buildWeatherUri(_id); else throw new android.database.SQLException("Failed to insert row into " + uri); break; } case LOCATION: { long _id = db.insert(WeatherContract.LocationEntry.TABLE_NAME, null, values); if (_id > 0) returnUri = WeatherContract.LocationEntry.buildLocationUri(_id); else throw new android.database.SQLException("Failed to insert row into " + uri); break; } default: throw new UnsupportedOperationException("Unknown uri: " + uri); } getContext().getContentResolver().notifyChange(uri, null); db.close(); return returnUri; } @Override public int delete(Uri uri, String selection, String[] selectionArgs) { final SQLiteDatabase db = mOpenHelper.getWritableDatabase(); final int match = sUriMatcher.match(uri); int rowsDeleted; // this makes delete all rows return the number of rows deleted if (null == selection) selection = "1"; switch (match) { case WEATHER: rowsDeleted = db.delete( WeatherContract.WeatherEntry.TABLE_NAME, selection, selectionArgs); break; case LOCATION: rowsDeleted = db.delete( WeatherContract.LocationEntry.TABLE_NAME, selection, selectionArgs); break; default: throw new UnsupportedOperationException("Unknown uri: " + uri); } // Because a null deletes all rows if (rowsDeleted != 0) { getContext().getContentResolver().notifyChange(uri, null); } db.close(); return rowsDeleted; } private void normalizeDate(ContentValues values) { // normalize the date value if (values.containsKey(WeatherContract.WeatherEntry.COLUMN_DATE)) { long dateValue = values.getAsLong(WeatherContract.WeatherEntry.COLUMN_DATE); values.put(WeatherContract.WeatherEntry.COLUMN_DATE, WeatherContract.normalizeDate(dateValue)); } } @Override public int update( Uri uri, ContentValues values, String selection, String[] selectionArgs) { final SQLiteDatabase db = mOpenHelper.getWritableDatabase(); final int match = sUriMatcher.match(uri); int rowsUpdated; switch (match) { case WEATHER: normalizeDate(values); rowsUpdated = db.update(WeatherContract.WeatherEntry.TABLE_NAME, values, selection, selectionArgs); break; case LOCATION: rowsUpdated = db.update(WeatherContract.LocationEntry.TABLE_NAME, values, selection, selectionArgs); break; default: throw new UnsupportedOperationException("Unknown uri: " + uri); } if (rowsUpdated != 0) { getContext().getContentResolver().notifyChange(uri, null); } db.close(); return rowsUpdated; } @Override public int bulkInsert(Uri uri, ContentValues[] values) { final SQLiteDatabase db = mOpenHelper.getWritableDatabase(); final int match = sUriMatcher.match(uri); switch (match) { case WEATHER: db.beginTransaction(); int returnCount = 0; try { for (ContentValues value : values) { normalizeDate(value); long _id = db.insert(WeatherContract.WeatherEntry.TABLE_NAME, null, value); if (_id != -1) { returnCount++; } } db.setTransactionSuccessful(); } finally { db.endTransaction(); } getContext().getContentResolver().notifyChange(uri, null); return returnCount; default: return super.bulkInsert(uri, values); } } // You do not need to call this method. This is a method specifically to assist the testing // framework in running smoothly. You can read more at: // http://developer.android.com/reference/android/content/ContentProvider.html#shutdown() @Override @TargetApi(11) public void shutdown() { mOpenHelper.close(); super.shutdown(); } }
apache-2.0
narfindustries/autopsy
Core/src/org/sleuthkit/autopsy/datamodel/BlackboardArtifactNode.java
27314
/* * Autopsy Forensic Browser * * Copyright 2011-2017 Basis Technology Corp. * Contact: carrier <at> sleuthkit <dot> org * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.sleuthkit.autopsy.datamodel; import java.beans.PropertyChangeEvent; import java.beans.PropertyChangeListener; import java.text.MessageFormat; import java.util.ArrayList; import java.util.Arrays; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; import java.util.logging.Level; import java.util.stream.Collectors; import javax.swing.Action; import org.apache.commons.lang3.StringUtils; import org.openide.nodes.Children; import org.openide.nodes.Sheet; import org.openide.util.Lookup; import org.openide.util.NbBundle; import org.openide.util.lookup.Lookups; import org.sleuthkit.autopsy.casemodule.Case; import org.sleuthkit.autopsy.coreutils.Logger; import org.sleuthkit.autopsy.coreutils.MessageNotifyUtil; import org.sleuthkit.autopsy.casemodule.events.BlackBoardArtifactTagAddedEvent; import org.sleuthkit.autopsy.casemodule.events.BlackBoardArtifactTagDeletedEvent; import org.sleuthkit.autopsy.casemodule.events.ContentTagAddedEvent; import org.sleuthkit.autopsy.casemodule.events.ContentTagDeletedEvent; import static org.sleuthkit.autopsy.datamodel.DisplayableItemNode.findLinked; import org.sleuthkit.autopsy.timeline.actions.ViewArtifactInTimelineAction; import org.sleuthkit.autopsy.timeline.actions.ViewFileInTimelineAction; import org.sleuthkit.datamodel.AbstractFile; import org.sleuthkit.datamodel.BlackboardArtifact; import org.sleuthkit.datamodel.BlackboardArtifact.ARTIFACT_TYPE; import org.sleuthkit.datamodel.BlackboardAttribute; import org.sleuthkit.datamodel.BlackboardAttribute.ATTRIBUTE_TYPE; import org.sleuthkit.datamodel.Content; import org.sleuthkit.datamodel.Tag; import org.sleuthkit.datamodel.TskCoreException; /** * Node wrapping a blackboard artifact object. This is generated from several * places in the tree. */ public class BlackboardArtifactNode extends DisplayableItemNode { private final BlackboardArtifact artifact; private final Content associated; private List<NodeProperty<? extends Object>> customProperties; private static final Logger LOGGER = Logger.getLogger(BlackboardArtifactNode.class.getName()); /* * Artifact types which should have the full unique path of the associated * content as a property. */ private static final Integer[] SHOW_UNIQUE_PATH = new Integer[]{ BlackboardArtifact.ARTIFACT_TYPE.TSK_HASHSET_HIT.getTypeID(), BlackboardArtifact.ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID(), BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT.getTypeID(), BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_ARTIFACT_HIT.getTypeID(),}; // TODO (RC): This is an unattractive alternative to subclassing BlackboardArtifactNode, // cut from the same cloth as the equally unattractive SHOW_UNIQUE_PATH array // above. It should be removed when and if the subclassing is implemented. private static final Integer[] SHOW_FILE_METADATA = new Integer[]{ BlackboardArtifact.ARTIFACT_TYPE.TSK_INTERESTING_FILE_HIT.getTypeID(),}; private final PropertyChangeListener pcl = new PropertyChangeListener() { @Override public void propertyChange(PropertyChangeEvent evt) { String eventType = evt.getPropertyName(); if (eventType.equals(Case.Events.BLACKBOARD_ARTIFACT_TAG_ADDED.toString())) { BlackBoardArtifactTagAddedEvent event = (BlackBoardArtifactTagAddedEvent) evt; if (event.getAddedTag().getArtifact().equals(artifact)) { updateSheet(); } } else if (eventType.equals(Case.Events.BLACKBOARD_ARTIFACT_TAG_DELETED.toString())) { BlackBoardArtifactTagDeletedEvent event = (BlackBoardArtifactTagDeletedEvent) evt; if (event.getDeletedTagInfo().getArtifactID() == artifact.getArtifactID()) { updateSheet(); } } else if (eventType.equals(Case.Events.CONTENT_TAG_ADDED.toString())) { ContentTagAddedEvent event = (ContentTagAddedEvent) evt; if (event.getAddedTag().getContent().equals(associated)) { updateSheet(); } } else if (eventType.equals(Case.Events.CONTENT_TAG_DELETED.toString())) { ContentTagDeletedEvent event = (ContentTagDeletedEvent) evt; if (event.getDeletedTagInfo().getContentID() == associated.getId()) { updateSheet(); } } else if (eventType.equals(Case.Events.CURRENT_CASE.toString())) { if (evt.getNewValue() == null) { // case was closed. Remove listeners so that we don't get called with a stale case handle removeListeners(); } } } }; /** * Construct blackboard artifact node from an artifact and using provided * icon * * @param artifact artifact to encapsulate * @param iconPath icon to use for the artifact */ public BlackboardArtifactNode(BlackboardArtifact artifact, String iconPath) { super(Children.LEAF, createLookup(artifact)); this.artifact = artifact; //this.associated = getAssociatedContent(artifact); this.associated = this.getLookup().lookup(Content.class); this.setName(Long.toString(artifact.getArtifactID())); this.setDisplayName(); this.setIconBaseWithExtension(iconPath); Case.addPropertyChangeListener(pcl); } /** * Construct blackboard artifact node from an artifact and using default * icon for artifact type * * @param artifact artifact to encapsulate */ public BlackboardArtifactNode(BlackboardArtifact artifact) { super(Children.LEAF, createLookup(artifact)); this.artifact = artifact; //this.associated = getAssociatedContent(artifact); this.associated = this.getLookup().lookup(Content.class); this.setName(Long.toString(artifact.getArtifactID())); this.setDisplayName(); this.setIconBaseWithExtension(ExtractedContent.getIconFilePath(artifact.getArtifactTypeID())); //NON-NLS Case.addPropertyChangeListener(pcl); } private void removeListeners() { Case.removePropertyChangeListener(pcl); } @Override @NbBundle.Messages({ "BlackboardArtifactNode.getAction.errorTitle=Error getting actions", "BlackboardArtifactNode.getAction.resultErrorMessage=There was a problem getting actions for the selected result." + " The 'View Result in Timeline' action will not be available.", "BlackboardArtifactNode.getAction.linkedFileMessage=There was a problem getting actions for the selected result. " + " The 'View File in Timeline' action will not be available."}) public Action[] getActions(boolean context) { List<Action> actionsList = new ArrayList<>(); actionsList.addAll(Arrays.asList(super.getActions(context))); //if this artifact has a time stamp add the action to view it in the timeline try { if (ViewArtifactInTimelineAction.hasSupportedTimeStamp(artifact)) { actionsList.add(new ViewArtifactInTimelineAction(artifact)); } } catch (TskCoreException ex) { LOGGER.log(Level.SEVERE, MessageFormat.format("Error getting arttribute(s) from blackboard artifact{0}.", artifact.getArtifactID()), ex); //NON-NLS MessageNotifyUtil.Notify.error(Bundle.BlackboardArtifactNode_getAction_errorTitle(), Bundle.BlackboardArtifactNode_getAction_resultErrorMessage()); } // if the artifact links to another file, add an action to go to that file try { AbstractFile c = findLinked(artifact); if (c != null) { actionsList.add(ViewFileInTimelineAction.createViewFileAction(c)); } } catch (TskCoreException ex) { LOGGER.log(Level.SEVERE, MessageFormat.format("Error getting linked file from blackboard artifact{0}.", artifact.getArtifactID()), ex); //NON-NLS MessageNotifyUtil.Notify.error(Bundle.BlackboardArtifactNode_getAction_errorTitle(), Bundle.BlackboardArtifactNode_getAction_linkedFileMessage()); } //if this artifact has associated content, add the action to view the content in the timeline AbstractFile file = getLookup().lookup(AbstractFile.class); if (null != file) { actionsList.add(ViewFileInTimelineAction.createViewSourceFileAction(file)); } return actionsList.toArray(new Action[actionsList.size()]); } @NbBundle.Messages({"# {0} - artifactDisplayName", "BlackboardArtifactNode.displayName.artifact={0} Artifact"}) /** * Set the filter node display name. The value will either be the file name * or something along the lines of e.g. "Messages Artifact" for keyword hits * on artifacts. */ private void setDisplayName() { String displayName = ""; //NON-NLS if (associated != null) { displayName = associated.getName(); } // If this is a node for a keyword hit on an artifact, we set the // display name to be the artifact type name followed by " Artifact" // e.g. "Messages Artifact". if (artifact != null && (artifact.getArtifactTypeID() == ARTIFACT_TYPE.TSK_KEYWORD_HIT.getTypeID() || artifact.getArtifactTypeID() == ARTIFACT_TYPE.TSK_INTERESTING_ARTIFACT_HIT.getTypeID())) { try { for (BlackboardAttribute attribute : artifact.getAttributes()) { if (attribute.getAttributeType().getTypeID() == ATTRIBUTE_TYPE.TSK_ASSOCIATED_ARTIFACT.getTypeID()) { BlackboardArtifact associatedArtifact = Case.getCurrentCase().getSleuthkitCase().getBlackboardArtifact(attribute.getValueLong()); if (associatedArtifact != null) { if (artifact.getArtifactTypeID() == ARTIFACT_TYPE.TSK_INTERESTING_ARTIFACT_HIT.getTypeID()) { artifact.getDisplayName(); } else { displayName = NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.displayName.artifact", associatedArtifact.getDisplayName()); } } } } } catch (TskCoreException ex) { // Do nothing since the display name will be set to the file name. } } this.setDisplayName(displayName); } @NbBundle.Messages({ "BlackboardArtifactNode.createSheet.artifactType.displayName=Artifact Type", "BlackboardArtifactNode.createSheet.artifactType.name=Artifact Type", "BlackboardArtifactNode.createSheet.artifactDetails.displayName=Artifact Details", "BlackboardArtifactNode.createSheet.artifactDetails.name=Artifact Details", "BlackboardArtifactNode.artifact.displayName=Artifact"}) @Override protected Sheet createSheet() { Sheet s = super.createSheet(); Sheet.Set ss = s.get(Sheet.PROPERTIES); if (ss == null) { ss = Sheet.createPropertiesSet(); s.put(ss); } final String NO_DESCR = NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.noDesc.text"); Map<String, Object> map = new LinkedHashMap<>(); fillPropertyMap(map, artifact); ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.srcFile.name"), NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.srcFile.displayName"), NO_DESCR, this.getDisplayName())); if (artifact.getArtifactTypeID() == ARTIFACT_TYPE.TSK_INTERESTING_ARTIFACT_HIT.getTypeID()) { try { BlackboardAttribute attribute = artifact.getAttribute(new BlackboardAttribute.Type(ATTRIBUTE_TYPE.TSK_ASSOCIATED_ARTIFACT)); if (attribute != null) { BlackboardArtifact associatedArtifact = Case.getCurrentCase().getSleuthkitCase().getBlackboardArtifact(attribute.getValueLong()); ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.artifactType.name"), NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.artifactType.displayName"), NO_DESCR, associatedArtifact.getDisplayName() + " " + NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.artifact.displayName"))); ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.artifactDetails.name"), NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.artifactDetails.displayName"), NO_DESCR, associatedArtifact.getShortDescription())); } } catch (TskCoreException ex) { // Do nothing since the display name will be set to the file name. } } for (Map.Entry<String, Object> entry : map.entrySet()) { ss.put(new NodeProperty<>(entry.getKey(), entry.getKey(), NO_DESCR, entry.getValue())); } //append custom node properties if (customProperties != null) { for (NodeProperty<? extends Object> np : customProperties) { ss.put(np); } } final int artifactTypeId = artifact.getArtifactTypeID(); // If mismatch, add props for extension and file type if (artifactTypeId == BlackboardArtifact.ARTIFACT_TYPE.TSK_EXT_MISMATCH_DETECTED.getTypeID()) { String ext = ""; //NON-NLS String actualMimeType = ""; //NON-NLS if (associated instanceof AbstractFile) { AbstractFile af = (AbstractFile) associated; ext = af.getNameExtension(); actualMimeType = af.getMIMEType(); if (actualMimeType == null) { actualMimeType = ""; //NON-NLS } } ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.ext.name"), NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.ext.displayName"), NO_DESCR, ext)); ss.put(new NodeProperty<>( NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.mimeType.name"), NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.mimeType.displayName"), NO_DESCR, actualMimeType)); } if (Arrays.asList(SHOW_UNIQUE_PATH).contains(artifactTypeId)) { String sourcePath = ""; //NON-NLS try { sourcePath = associated.getUniquePath(); } catch (TskCoreException ex) { LOGGER.log(Level.WARNING, "Failed to get unique path from: {0}", associated.getName()); //NON-NLS } if (sourcePath.isEmpty() == false) { ss.put(new NodeProperty<>( NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.filePath.name"), NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.filePath.displayName"), NO_DESCR, sourcePath)); } if (Arrays.asList(SHOW_FILE_METADATA).contains(artifactTypeId)) { AbstractFile file = associated instanceof AbstractFile ? (AbstractFile) associated : null; ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileModifiedTime.name"), NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileModifiedTime.displayName"), "", file != null ? ContentUtils.getStringTime(file.getMtime(), file) : "")); ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileChangedTime.name"), NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileChangedTime.displayName"), "", file != null ? ContentUtils.getStringTime(file.getCtime(), file) : "")); ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileAccessedTime.name"), NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileAccessedTime.displayName"), "", file != null ? ContentUtils.getStringTime(file.getAtime(), file) : "")); ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileCreatedTime.name"), NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileCreatedTime.displayName"), "", file != null ? ContentUtils.getStringTime(file.getCrtime(), file) : "")); ss.put(new NodeProperty<>(NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileSize.name"), NbBundle.getMessage(BlackboardArtifactNode.class, "ContentTagNode.createSheet.fileSize.displayName"), "", associated.getSize())); } } else { String dataSourceStr = ""; try { Content dataSource = associated.getDataSource(); if (dataSource != null) { dataSourceStr = dataSource.getName(); } else { dataSourceStr = getRootParentName(); } } catch (TskCoreException ex) { LOGGER.log(Level.WARNING, "Failed to get image name from {0}", associated.getName()); //NON-NLS } if (dataSourceStr.isEmpty() == false) { ss.put(new NodeProperty<>( NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.dataSrc.name"), NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.createSheet.dataSrc.displayName"), NO_DESCR, dataSourceStr)); } } // add properties for tags List<Tag> tags = new ArrayList<>(); try { tags.addAll(Case.getCurrentCase().getServices().getTagsManager().getBlackboardArtifactTagsByArtifact(artifact)); tags.addAll(Case.getCurrentCase().getServices().getTagsManager().getContentTagsByContent(associated)); } catch (TskCoreException ex) { LOGGER.log(Level.SEVERE, "Failed to get tags for artifact " + artifact.getDisplayName(), ex); } ss.put(new NodeProperty<>("Tags", NbBundle.getMessage(AbstractAbstractFileNode.class, "BlackboardArtifactNode.createSheet.tags.displayName"), NO_DESCR, tags.stream().map(t -> t.getName().getDisplayName()).collect(Collectors.joining(", ")))); return s; } private void updateSheet() { this.setSheet(createSheet()); } private String getRootParentName() { String parentName = associated.getName(); Content parent = associated; try { while ((parent = parent.getParent()) != null) { parentName = parent.getName(); } } catch (TskCoreException ex) { LOGGER.log(Level.WARNING, "Failed to get parent name from {0}", associated.getName()); //NON-NLS return ""; } return parentName; } /** * Add an additional custom node property to that node before it is * displayed * * @param np NodeProperty to add */ public void addNodeProperty(NodeProperty<?> np) { if (null == customProperties) { //lazy create the list customProperties = new ArrayList<>(); } customProperties.add(np); } /** * Fill map with Artifact properties * * @param map map with preserved ordering, where property names/values * are put * @param artifact to extract properties from */ @SuppressWarnings("deprecation") private void fillPropertyMap(Map<String, Object> map, BlackboardArtifact artifact) { try { for (BlackboardAttribute attribute : artifact.getAttributes()) { final int attributeTypeID = attribute.getAttributeType().getTypeID(); //skip some internal attributes that user shouldn't see if (attributeTypeID == ATTRIBUTE_TYPE.TSK_PATH_ID.getTypeID() || attributeTypeID == ATTRIBUTE_TYPE.TSK_TAGGED_ARTIFACT.getTypeID() || attributeTypeID == ATTRIBUTE_TYPE.TSK_ASSOCIATED_ARTIFACT.getTypeID() || attributeTypeID == ATTRIBUTE_TYPE.TSK_SET_NAME.getTypeID() || attributeTypeID == ATTRIBUTE_TYPE.TSK_KEYWORD_SEARCH_TYPE.getTypeID()) { } else if (artifact.getArtifactTypeID() == BlackboardArtifact.ARTIFACT_TYPE.TSK_EMAIL_MSG.getTypeID()) { addEmailMsgProperty (map, attribute); } else if (attribute.getAttributeType().getValueType() == BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.DATETIME) { map.put(attribute.getAttributeType().getDisplayName(), ContentUtils.getStringTime(attribute.getValueLong(), associated)); } else if (artifact.getArtifactTypeID() == ARTIFACT_TYPE.TSK_TOOL_OUTPUT.getTypeID() && attributeTypeID == ATTRIBUTE_TYPE.TSK_TEXT.getTypeID()) { /* * This was added because the RegRipper output would often * cause the UI to get a black line accross it and hang if * you hovered over large output or selected it. This * reduces the amount of data in the table. Could consider * doing this for all fields in the UI. */ String value = attribute.getDisplayString(); if (value.length() > 512) { value = value.substring(0, 512); } map.put(attribute.getAttributeType().getDisplayName(), value); } else { map.put(attribute.getAttributeType().getDisplayName(), attribute.getDisplayString()); } } } catch (TskCoreException ex) { LOGGER.log(Level.SEVERE, "Getting attributes failed", ex); //NON-NLS } } /** * Fill map with EmailMsg properties, not all attributes are filled * * @param map map with preserved ordering, where property names/values * are put * @param attribute attribute to check/fill as property */ private void addEmailMsgProperty(Map<String, Object> map, BlackboardAttribute attribute ) { final int attributeTypeID = attribute.getAttributeType().getTypeID(); // Skip certain Email msg attributes if (attributeTypeID == ATTRIBUTE_TYPE.TSK_DATETIME_SENT.getTypeID() || attributeTypeID == ATTRIBUTE_TYPE.TSK_EMAIL_CONTENT_HTML.getTypeID() || attributeTypeID == ATTRIBUTE_TYPE.TSK_EMAIL_CONTENT_RTF.getTypeID() || attributeTypeID == ATTRIBUTE_TYPE.TSK_EMAIL_BCC.getTypeID() || attributeTypeID == ATTRIBUTE_TYPE.TSK_EMAIL_CC.getTypeID() || attributeTypeID == ATTRIBUTE_TYPE.TSK_HEADERS.getTypeID() ) { // do nothing } else if (attributeTypeID == ATTRIBUTE_TYPE.TSK_EMAIL_CONTENT_PLAIN.getTypeID()) { String value = attribute.getDisplayString(); if (value.length() > 160) { value = value.substring(0, 160) + "..."; } map.put(attribute.getAttributeType().getDisplayName(), value); } else if (attribute.getAttributeType().getValueType() == BlackboardAttribute.TSK_BLACKBOARD_ATTRIBUTE_VALUE_TYPE.DATETIME) { map.put(attribute.getAttributeType().getDisplayName(), ContentUtils.getStringTime(attribute.getValueLong(), associated)); } else { map.put(attribute.getAttributeType().getDisplayName(), attribute.getDisplayString()); } } @Override public <T> T accept(DisplayableItemNodeVisitor<T> v) { return v.visit(this); } /** * Create a Lookup based on what is in the passed in artifact. * * @param artifact * * @return */ private static Lookup createLookup(BlackboardArtifact artifact) { List<Object> forLookup = new ArrayList<>(); forLookup.add(artifact); // Add the content the artifact is associated with Content content = getAssociatedContent(artifact); if (content != null) { forLookup.add(content); } return Lookups.fixed(forLookup.toArray(new Object[forLookup.size()])); } private static Content getAssociatedContent(BlackboardArtifact artifact) { try { return artifact.getSleuthkitCase().getContentById(artifact.getObjectID()); } catch (TskCoreException ex) { LOGGER.log(Level.WARNING, "Getting file failed", ex); //NON-NLS } throw new IllegalArgumentException( NbBundle.getMessage(BlackboardArtifactNode.class, "BlackboardArtifactNode.getAssocCont.exception.msg")); } @Override public boolean isLeafTypeNode() { return true; } @Override public String getItemType() { return getClass().getName(); } }
apache-2.0
bazaarvoice/emodb
databus/src/main/java/com/bazaarvoice/emodb/databus/repl/DefaultReplicationSource.java
1617
package com.bazaarvoice.emodb.databus.repl; import com.bazaarvoice.emodb.databus.core.UpdateRefSerializer; import com.bazaarvoice.emodb.event.api.EventData; import com.bazaarvoice.emodb.event.api.EventStore; import com.bazaarvoice.emodb.sor.core.UpdateRef; import com.google.common.base.Function; import com.google.common.collect.Lists; import com.google.inject.Inject; import java.util.Collection; import java.util.List; import static com.google.common.base.Preconditions.checkArgument; import static java.util.Objects.requireNonNull; public class DefaultReplicationSource implements ReplicationSource { private final EventStore _eventStore; @Inject public DefaultReplicationSource(EventStore eventStore) { _eventStore = eventStore; } @Override public List<ReplicationEvent> get(String channel, int limit) { requireNonNull(channel, "channel"); checkArgument(limit > 0, "Limit must be >0"); List<EventData> rawEvents = _eventStore.peek(channel, limit); return Lists.transform(rawEvents, new Function<EventData, ReplicationEvent>() { @Override public ReplicationEvent apply(EventData rawEvent) { UpdateRef ref = UpdateRefSerializer.fromByteBuffer(rawEvent.getData()); return new ReplicationEvent(rawEvent.getId(), ref); } }); } @Override public void delete(String channel, Collection<String> eventIds) { requireNonNull(channel, "channel"); requireNonNull(eventIds, "eventIds"); _eventStore.delete(channel, eventIds, false); } }
apache-2.0
CHStudio/laravel-transclude
src/Exceptions/MissingTranscludeDirective.php
121
<?php namespace CHStudio\LaravelTransclude\Exceptions; class MissingTranscludeDirective extends \RuntimeException { }
apache-2.0
TianYunZi/15springcloud
6.1.1.spring-cloud-feign/src/main/java/com/boot/service/HelloServiceFallback.java
583
package com.boot.service; import com.boot.model.User; import org.springframework.stereotype.Component; /** * Created by Admin on 2017/6/29. * 熔断机制 */ @Component public class HelloServiceFallback implements HelloService { @Override public String hello() { return "error"; } @Override public String hello(String name) { return "error"; } @Override public User hello(String name, Integer age) { return new User("未知", 0); } @Override public String hello(User user) { return "error"; } }
apache-2.0
jqno/equalsverifier
equalsverifier-core/src/main/java/nl/jqno/equalsverifier/internal/reflection/ClassAccessor.java
8056
package nl.jqno.equalsverifier.internal.reflection; import static nl.jqno.equalsverifier.internal.util.Rethrow.rethrow; import java.lang.reflect.Field; import java.lang.reflect.Modifier; import java.util.Set; import java.util.function.Predicate; import nl.jqno.equalsverifier.internal.prefabvalues.PrefabValues; import nl.jqno.equalsverifier.internal.prefabvalues.TypeTag; import nl.jqno.equalsverifier.internal.reflection.annotations.AnnotationCache; import nl.jqno.equalsverifier.internal.reflection.annotations.NonnullAnnotationVerifier; /** * Instantiates and populates objects of a given class. {@link ClassAccessor} can create two * different instances of T, which are guaranteed not to be equal to each other, and which contain * no null values. * * @param <T> A class. */ public class ClassAccessor<T> { private final Class<T> type; private final PrefabValues prefabValues; /** Private constructor. Call {@link #of(Class, PrefabValues)} instead. */ ClassAccessor(Class<T> type, PrefabValues prefabValues) { this.type = type; this.prefabValues = prefabValues; } /** * Factory method. * * @param <T> The class on which {@link ClassAccessor} operates. * @param type The class on which {@link ClassAccessor} operates. Should be the same as T. * @param prefabValues Prefabricated values with which to fill instantiated objects. * @return A {@link ClassAccessor} for T. */ public static <T> ClassAccessor<T> of(Class<T> type, PrefabValues prefabValues) { return new ClassAccessor<>(type, prefabValues); } /** @return The class on which {@link ClassAccessor} operates. */ public Class<T> getType() { return type; } /** * Determines whether T is a Java Record. * * @return true if T is a Java Record. */ public boolean isRecord() { return RecordsHelper.isRecord(type); } /** * Determines whether T is a sealed class. * * @return true if T is a sealed class */ public boolean isSealed() { return SealedClassesHelper.isSealed(type); } /** * Determines whether T declares a field. This does not include inherited fields. * * @param field The field that we want to detect. * @return True if T declares the field. */ public boolean declaresField(Field field) { try { type.getDeclaredField(field.getName()); return true; } catch (NoSuchFieldException e) { return false; } } /** * Determines whether T has an {@code equals} method. * * @return True if T has an {@code equals} method. */ public boolean declaresEquals() { return declaresMethod("equals", Object.class); } /** * Determines whether T has an {@code hashCode} method. * * @return True if T has an {@code hashCode} method. */ public boolean declaresHashCode() { return declaresMethod("hashCode"); } private boolean declaresMethod(String name, Class<?>... parameterTypes) { try { type.getDeclaredMethod(name, parameterTypes); return true; } catch (NoSuchMethodException e) { return false; } } /** * Determines whether T's {@code equals} method is abstract. * * @return True if T's {@code equals} method is abstract. */ public boolean isEqualsAbstract() { return isMethodAbstract("equals", Object.class); } /** * Determines whether T's {@code hashCode} method is abstract. * * @return True if T's {@code hashCode} method is abstract. */ public boolean isHashCodeAbstract() { return isMethodAbstract("hashCode"); } private boolean isMethodAbstract(String name, Class<?>... parameterTypes) { return rethrow(() -> Modifier.isAbstract(type.getMethod(name, parameterTypes).getModifiers()) ); } /** * Determines whether T's {@code equals} method is inherited from {@link Object}. * * @return true if T's {@code equals} method is inherited from {@link Object}; false if it is * overridden in T or in any of its superclasses (except {@link Object}). */ public boolean isEqualsInheritedFromObject() { ClassAccessor<? super T> i = this; while (i.getType() != Object.class) { if (i.declaresEquals() && !i.isEqualsAbstract()) { return false; } i = i.getSuperAccessor(); } return true; } /** * Returns an accessor for T's superclass. * * @return An accessor for T's superclass. */ public ClassAccessor<? super T> getSuperAccessor() { return ClassAccessor.of(type.getSuperclass(), prefabValues); } /** * Returns an instance of T that is not equal to the instance of T returned by {@link * #getBlueObject(TypeTag)}. * * @param enclosingType Describes the type that contains this object as a field, to determine * any generic parameters it may contain. * @return An instance of T. */ public T getRedObject(TypeTag enclosingType) { return getRedAccessor(enclosingType).get(); } /** * Returns an {@link ObjectAccessor} for {@link #getRedObject(TypeTag)}. * * @param enclosingType Describes the type that contains this object as a field, to determine * any generic parameters it may contain. * @return An {@link ObjectAccessor} for {@link #getRedObject(TypeTag)}. */ public ObjectAccessor<T> getRedAccessor(TypeTag enclosingType) { return buildObjectAccessor().scramble(prefabValues, enclosingType); } /** * Returns an instance of T that is not equal to the instance of T returned by {@link * #getRedObject(TypeTag)}. * * @param enclosingType Describes the type that contains this object as a field, to determine * any generic parameters it may contain. * @return An instance of T. */ public T getBlueObject(TypeTag enclosingType) { return getBlueAccessor(enclosingType).get(); } /** * Returns an {@link ObjectAccessor} for {@link #getBlueObject(TypeTag)}. * * @param enclosingType Describes the type that contains this object as a field, to determine * any generic parameters it may contain. * @return An {@link ObjectAccessor} for {@link #getBlueObject(TypeTag)}. */ public ObjectAccessor<T> getBlueAccessor(TypeTag enclosingType) { return buildObjectAccessor() .scramble(prefabValues, enclosingType) .scramble(prefabValues, enclosingType); } /** * Returns an {@link ObjectAccessor} for an instance of T where all the fields are initialized * to their default values. I.e., 0 for ints, and null for objects (except when the field is * marked with a NonNull annotation). * * @param enclosingType Describes the type that contains this object as a field, to determine * any generic parameters it may contain. * @param nonnullFields Fields which are not allowed to be set to null. * @param annotationCache To check for any NonNull annotations. * @return An {@link ObjectAccessor} for an instance of T where all the fields are initialized * to their default values. */ public ObjectAccessor<T> getDefaultValuesAccessor( TypeTag enclosingType, Set<String> nonnullFields, AnnotationCache annotationCache ) { Predicate<Field> canBeDefault = f -> !NonnullAnnotationVerifier.fieldIsNonnull(f, annotationCache) && !nonnullFields.contains(f.getName()); return buildObjectAccessor().clear(canBeDefault, prefabValues, enclosingType); } private ObjectAccessor<T> buildObjectAccessor() { T object = Instantiator.of(type).instantiate(); return ObjectAccessor.of(object); } }
apache-2.0
Arunk0548/PatternCode
src/P10/README.md
825
# PatternCode Java Pattern Programs ~~~ Take an integer input and draw the below pattern. Say for example drawn below pattern for input number 5. A B B B C C C C C D D D D D D D E E E E E E E E E ~~~ ~~~ import java.io.*; import java.util.*; public class Pattern { public static void main(String args[] ) throws Exception { System.out.println("Enter an integer number n [where N>=1, N<=26]"); Scanner s = new Scanner(System.in); int inputNum = Integer.parseInt(s.nextLine()); int tempNum = 1; char printChar = 'A'; while(inputNum>0) { for(int index =0; index<inputNum - 1;index++) System.out.print(" "); for(int index = 1;index<tempNum ;index++) System.out.print(printChar + " "); System.out.println(printChar); tempNum+=2; printChar++; inputNum--; } } } ~~~
apache-2.0
spaetzel/spaetzel.github.com
_posts/2005-12-20-university-of-western-ontario-tops-on-technorati-saugeen-stripper.html
1982
--- wordpress_id: 79 title: University of Western Ontario tops on Technorati (Saugeen Stripper) wordpress_url: http://spaetzel.com/PermaLink,guid,557.aspx layout: post --- I was reading <a href="http://onlyinevitable.blogspot.com/2005/12/geen-beats-bush.html">this blog post</a> and saw that she noticed that the word <a href="http://technorati.com/search/saugeen">Saugeen</a> topped <a href="http://technorati.com/search/bush">Bush</a> as a popular search on <a href="http://technorati.com">Technorati</a>. I headed over to technorati myself and saw this:<br /> <br /> <a href="http://redune.com/system/files?file=technorati-uwo_l.jpg"><img src="http://redune.com/system/files?file=technorati-uwo_m.jpg"></a> <br /> <a href="http://technorati.com/search/university%20of%20western%20ontario">The University Of Western Ontario</a> is the #7 search this hour on Technorati. Wow, this Saugeen Stripper thing it getting worldwide attention now. There have been news articles in the <a href="http://www.thestar.com/NASApp/cs/ContentServer?pagename=thestar/Layout/Article_Type1&c=Article&cid=1134427836448&call_pageid=968332188492&col=968793972154">Toronto Star</a> and the Toronto Sun, and now posts are appearing in blogs that have nothing to do with UWO. Myself, I attend Western and have spent a lot of time in Saugeen. I certainly am not surprised at all that something like this occured in that residence. And if people want to take their clothes off and have photos shared worldwide, I say go ahead, do what you want. If you haven't seen the photos yet, they are <a href="http://avrilrulez.buzznet.com/user/archive/">here </a> <strong>NSFW</strong>.<br /> <br /> <a href="http://technorati.com/search/university%20of%20western%20ontario">Link</a><img width="0" height="0" src="http://spaetzel.com/aggbug.ashx?id=557" />
apache-2.0
drlebedev/nd4j
nd4j-backends/nd4j-api-parent/nd4j-api/src/main/java/org/nd4j/linalg/api/ndarray/BaseShapeInfoProvider.java
475
package org.nd4j.linalg.api.ndarray; import org.nd4j.linalg.api.buffer.DataBuffer; import org.nd4j.linalg.api.shape.Shape; /** * @author [email protected] */ public abstract class BaseShapeInfoProvider implements ShapeInfoProvider { @Override public DataBuffer createShapeInformation(int[] shape, int[] stride, int offset, int elementWiseStride, char order) { return Shape.createShapeInformation(shape, stride, offset, elementWiseStride, order); } }
apache-2.0
4front/apphost
test/plugin.client-settings.js
940
var clientSettings = require('../lib/plugins/client-settings'); var express = require('express'); var supertest = require('supertest'); var assert = require('assert'); describe('logout()', function() { var server; var clientConfigOptions; beforeEach(function() { server = express(); server.use(function(req, res, next) { req.ext = {clientConfig: {}}; next(); }); server.use(function(req, res, next) { clientSettings(clientConfigOptions)(req, res, next); }); server.get('/', function(req, res, next) { res.json(req.ext.clientConfig); }); }); it('should redirect to index page', function(done) { clientConfigOptions = { option1: 'foo', option2: { name: 'joe'} }; supertest(server) .get('/') .expect(200) .expect(function(res) { assert.deepEqual(res.body.settings, clientConfigOptions); }) .end(done); }); });
apache-2.0
koopaworks/polymer-gh-issues
index.js
510
'use strict'; var express = require('express'); var app = express(); app.use('/components/gh-issues', express.static( __dirname)); app.use('/components', express.static(__dirname + '/bower_components')); app.get('/', function(req, res){ res.redirect('/components/gh-issues/'); }); app.get('/hello', function (req, res) { res.status(200).send('Hello, world!'); }); var server = app.listen(process.env.PORT || '8080', function () { console.log('App listening on port %s', server.address().port); });
apache-2.0
Cognifide/bobcat
bb-junit5/src/main/java/com/cognifide/qa/bb/junit5/JUnit5Constants.java
1013
/*- * #%L * Bobcat * %% * Copyright (C) 2018 Cognifide Ltd. * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.cognifide.qa.bb.junit5; import org.junit.jupiter.api.extension.ExtensionContext.Namespace; /** * Contains common constants for the whole JUnit 5 module. */ public final class JUnit5Constants { public static final Namespace NAMESPACE = Namespace.create("com", "cognifide", "qa", "bb", "junit", "guice"); private JUnit5Constants() { //util } }
apache-2.0
dcarbone/php-fhir-generated
src/DCarbone/PHPFHIRGenerated/R4/PHPFHIRTests/FHIRCodePrimitive/FHIRCarePlanActivityStatusListTest.php
3509
<?php namespace DCarbone\PHPFHIRGenerated\R4\PHPFHIRTests\FHIRCodePrimitive; /*! * This class was generated with the PHPFHIR library (https://github.com/dcarbone/php-fhir) using * class definitions from HL7 FHIR (https://www.hl7.org/fhir/) * * Class creation date: December 26th, 2019 15:44+0000 * * PHPFHIR Copyright: * * Copyright 2016-2019 Daniel Carbone ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * * FHIR Copyright Notice: * * Copyright (c) 2011+, HL7, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * Neither the name of HL7 nor the names of its contributors may be used to * endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * * * Generated on Fri, Nov 1, 2019 09:29+1100 for FHIR v4.0.1 * * Note: the schemas & schematrons do not contain all of the rules about what makes resources * valid. Implementers will still need to be familiar with the content of the specification and with * any profiles that apply to the resources in order to make a conformant implementation. * */ use PHPUnit\Framework\TestCase; use DCarbone\PHPFHIRGenerated\R4\FHIRCodePrimitive\FHIRCarePlanActivityStatusList; /** * Class FHIRCarePlanActivityStatusListTest * @package \DCarbone\PHPFHIRGenerated\R4\PHPFHIRTests\FHIRCodePrimitive */ class FHIRCarePlanActivityStatusListTest extends TestCase { public function testCanConstructTypeNoArgs() { $type = new FHIRCarePlanActivityStatusList(); $this->assertInstanceOf('\DCarbone\PHPFHIRGenerated\R4\FHIRCodePrimitive\FHIRCarePlanActivityStatusList', $type); } }
apache-2.0
kujian/frontendDaily
2019/06/04.md
3880
# [2019-06-04 精选博文推荐](http://hao.caibaojian.com/date/2019/06/04) [前端日报](http://caibaojian.com/c/news)栏目数据来自[码农头条](http://hao.caibaojian.com/)(我开发的爬虫),每日分享前端、移动开发、设计、资源和资讯等,为开发者提供动力,点击Star按钮来关注这个项目,点击Watch来收听每日的更新[Github主页](https://github.com/kujian/frontendDaily) * [CSS @font-face 性能优化](http://hao.caibaojian.com/103271.html) (奇舞团) * [Webpack5.0 新特性尝鲜实战 🦀🦀](http://hao.caibaojian.com/103595.html) (奇舞团) * [Git 自救指南](http://hao.caibaojian.com/113643.html) (SegmentFault) * [从零实现一个 Webpack Loader](http://hao.caibaojian.com/113720.html) (推酷网) * [【JavaScript】数据结构与内存中的堆和栈](http://hao.caibaojian.com/113709.html) (推酷网) *** * [前端进阶之 let、const、var](http://hao.caibaojian.com/113721.html) (推酷网) * [jQuery源码解析之trigger()](http://hao.caibaojian.com/113704.html) (推酷网) * [Vue响应式原理-理解Observer、Dep、Watcher](http://hao.caibaojian.com/113715.html) (推酷网) * [公司要求会使用框架vue,面试题会被问及哪些?](http://hao.caibaojian.com/113719.html) (推酷网) * [JavaScript进阶系列-原型继承与原型链](http://hao.caibaojian.com/113700.html) (推酷网) *** * [JS数组的数据结构](http://hao.caibaojian.com/113716.html) (推酷网) * [六种组织CSS的方式](http://hao.caibaojian.com/113641.html) (SegmentFault) * [手把手教你做一个APP应用(含源码)](http://hao.caibaojian.com/113711.html) (推酷网) * [前端培训-初级阶段-场景实战(2019-06-06)-Content-Type对照表及日常使用](http://hao.caibaojian.com/113654.html) (SegmentFault) * [那些年,我们一起踩过的坑(前端防翻车指南)](http://hao.caibaojian.com/113701.html) (推酷网) *** * [重回前端之Class](http://hao.caibaojian.com/113815.html) (推酷网) * [配置Webpack4支持ES6/TypeScript/异步文件引用加载](http://hao.caibaojian.com/113714.html) (推酷网) * [BAT 高频面试题:寻找两个有序数组的中位数](http://hao.caibaojian.com/113691.html) (开发者头条) * [异步编程(1):Promise](http://hao.caibaojian.com/113705.html) (推酷网) * [重学前端(8)封装ajax,http,跨域问题](http://hao.caibaojian.com/113805.html) (推酷网) *** * [&lt;link&gt;标签的几个用法,帮助提高页面性能](http://hao.caibaojian.com/113650.html) (SegmentFault) * [Golang_Puzzlers 项目的小更新](http://hao.caibaojian.com/113683.html) (开发者头条) * [乌合之众:群体心理](http://hao.caibaojian.com/113661.html) (开发者头条) * [Vue核心50讲 | 第四回:Vue 官方赠送的橙色装备,岂能不要](http://hao.caibaojian.com/113697.html) (推酷网) * [谈谈JavaScript中装箱和拆箱](http://hao.caibaojian.com/113796.html) (推酷网) *** * [移动端跨平台方案如何选择](http://hao.caibaojian.com/113300.html) (奇舞团) * [资源混淆是如何影响到 Kotlin 协程的](http://hao.caibaojian.com/113672.html) (开发者头条) * [前端框架技术选型 React vs. Vue (vs. Angular)](http://hao.caibaojian.com/113708.html) (推酷网) * [最全的Eslint配置模板,从此统一团队的编程习](http://hao.caibaojian.com/113807.html) (推酷网) * [精读《What&#8217;s new in javascript》](http://hao.caibaojian.com/113651.html) (SegmentFault) 日报维护作者:[前端博客](http://caibaojian.com/) 和 [微博](http://caibaojian.com/go/weibo) ![weixin](https://user-images.githubusercontent.com/3055447/38468989-651132ac-3b80-11e8-8e6b-15122322a9d7.png)
apache-2.0
Banno/polymer-lint
spec/lib/CLISpec.js
2623
const CLI = require('CLI'); describe('CLI', () => { function args(...arr) { return [ 'node', 'polymer-lint.js', ...arr ]; } let Options, Linter; const filenames = [ './spec/integration/good-component.html', './spec/integration/bad-component.html', ]; beforeEach(() => { Options = require('Options'); spyOn(console, 'log'); }); describe('execute', () => { describe('with no arguments', () => { it('displays help', () => { spyOn(Options, 'generateHelp').and.returnValue('Help'); CLI.execute(args('--help')); expect(Options.generateHelp).toHaveBeenCalled(); expect(console.log).toHaveBeenCalledWith('Help'); }); }); describe('with filename arguments', () => { let mockPromise; beforeEach(() => { mockPromise = jasmine.createSpyObj('promise', ['then']); Linter = require('Linter'); spyOn(Linter, 'lintFiles').and.returnValue(mockPromise); }); it('calls Linter.lintFiles with the given filenames', () => { CLI.execute(args(...filenames)); expect(Linter.lintFiles).toHaveBeenCalledWith( filenames, jasmine.objectContaining({ _: filenames })); expect(mockPromise.then).toHaveBeenCalledWith(jasmine.any(Function)); }); describe('and --rules', () => { it('calls Linter.lintFiles with the expected `rules` option', () => { const ruleNames = ['no-missing-import', 'no-unused-import']; CLI.execute(args('--rules', ruleNames.join(','), ...filenames)); expect(Linter.lintFiles).toHaveBeenCalledTimes(1); const [ actualFilenames, { rules: actualRules } ] = Linter.lintFiles.calls.argsFor(0); expect(actualFilenames).toEqual(filenames); expect(actualRules).toEqual(ruleNames); expect(mockPromise.then).toHaveBeenCalledWith(jasmine.any(Function)); }); }); }); describe('with --help', () => { it('displays help', () => { spyOn(Options, 'generateHelp').and.returnValue('Help'); CLI.execute(args('--help')); expect(Options.generateHelp).toHaveBeenCalled(); expect(console.log).toHaveBeenCalledWith('Help'); }); }); describe('with --version', () => { it('prints the version number', () => { CLI.execute(args('--version')); const expectedVersion = `v${require('../../package.json').version}`; expect(console.log).toHaveBeenCalledWith(expectedVersion); }); }); describe('with --color', () => {}); describe('with --no-color', () => {}); }); });
apache-2.0
moreus/hadoop
hadoop-0.10.0/docs/api/org/apache/hadoop/io/package-tree.html
22245
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd"> <!--NewPage--> <HTML> <HEAD> <!-- Generated by javadoc (build 1.5.0_08) on Fri Jan 05 15:06:07 PST 2007 --> <TITLE> org.apache.hadoop.io Class Hierarchy (Hadoop 0.10.0 API) </TITLE> <LINK REL ="stylesheet" TYPE="text/css" HREF="../../../../stylesheet.css" TITLE="Style"> <SCRIPT type="text/javascript"> function windowTitle() { parent.document.title="org.apache.hadoop.io Class Hierarchy (Hadoop 0.10.0 API)"; } </SCRIPT> <NOSCRIPT> </NOSCRIPT> </HEAD> <BODY BGCOLOR="white" onload="windowTitle();"> <!-- ========= START OF TOP NAVBAR ======= --> <A NAME="navbar_top"><!-- --></A> <A HREF="#skip-navbar_top" title="Skip navigation links"></A> <TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY=""> <TR> <TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A NAME="navbar_top_firstrow"><!-- --></A> <TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY=""> <TR ALIGN="center" VALIGN="top"> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <FONT CLASS="NavBarFont1">Class</FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <FONT CLASS="NavBarFont1">Use</FONT>&nbsp;</TD> <TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> &nbsp;<FONT CLASS="NavBarFont1Rev"><B>Tree</B></FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A>&nbsp;</TD> </TR> </TABLE> </TD> <TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM> </EM> </TD> </TR> <TR> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> &nbsp;<A HREF="../../../../org/apache/hadoop/fs/s3/package-tree.html"><B>PREV</B></A>&nbsp; &nbsp;<A HREF="../../../../org/apache/hadoop/io/compress/package-tree.html"><B>NEXT</B></A></FONT></TD> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> <A HREF="../../../../index.html?org/apache/hadoop/io/package-tree.html" target="_top"><B>FRAMES</B></A> &nbsp; &nbsp;<A HREF="package-tree.html" target="_top"><B>NO FRAMES</B></A> &nbsp; &nbsp;<SCRIPT type="text/javascript"> <!-- if(window==top) { document.writeln('<A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A>'); } //--> </SCRIPT> <NOSCRIPT> <A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A> </NOSCRIPT> </FONT></TD> </TR> </TABLE> <A NAME="skip-navbar_top"></A> <!-- ========= END OF TOP NAVBAR ========= --> <HR> <CENTER> <H2> Hierarchy For Package org.apache.hadoop.io </H2> </CENTER> <DL> <DT><B>Package Hierarchies:</B><DD><A HREF="../../../../overview-tree.html">All Packages</A></DL> <HR> <H2> Class Hierarchy </H2> <UL> <LI TYPE="circle">java.lang.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/lang/Object.html" title="class or interface in java.lang"><B>Object</B></A><UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/ArrayWritable.html" title="class in org.apache.hadoop.io"><B>ArrayWritable</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/Writable.html" title="interface in org.apache.hadoop.io">Writable</A>) <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/BooleanWritable.html" title="class in org.apache.hadoop.io"><B>BooleanWritable</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableComparable.html" title="interface in org.apache.hadoop.io">WritableComparable</A>) <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/BytesWritable.html" title="class in org.apache.hadoop.io"><B>BytesWritable</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableComparable.html" title="interface in org.apache.hadoop.io">WritableComparable</A>) <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/CompressedWritable.html" title="class in org.apache.hadoop.io"><B>CompressedWritable</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/Writable.html" title="interface in org.apache.hadoop.io">Writable</A>) <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/FloatWritable.html" title="class in org.apache.hadoop.io"><B>FloatWritable</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableComparable.html" title="interface in org.apache.hadoop.io">WritableComparable</A>) <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/GenericWritable.html" title="class in org.apache.hadoop.io"><B>GenericWritable</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/Writable.html" title="interface in org.apache.hadoop.io">Writable</A>) <LI TYPE="circle">java.io.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/io/InputStream.html" title="class or interface in java.io"><B>InputStream</B></A> (implements java.io.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/io/Closeable.html" title="class or interface in java.io">Closeable</A>) <UL> <LI TYPE="circle">java.io.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/io/FilterInputStream.html" title="class or interface in java.io"><B>FilterInputStream</B></A><UL> <LI TYPE="circle">java.io.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/io/DataInputStream.html" title="class or interface in java.io"><B>DataInputStream</B></A> (implements java.io.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/io/DataInput.html" title="class or interface in java.io">DataInput</A>) <UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/DataInputBuffer.html" title="class in org.apache.hadoop.io"><B>DataInputBuffer</B></A></UL> </UL> </UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/IntWritable.html" title="class in org.apache.hadoop.io"><B>IntWritable</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableComparable.html" title="interface in org.apache.hadoop.io">WritableComparable</A>) <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/LongWritable.html" title="class in org.apache.hadoop.io"><B>LongWritable</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableComparable.html" title="interface in org.apache.hadoop.io">WritableComparable</A>) <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/MapFile.html" title="class in org.apache.hadoop.io"><B>MapFile</B></A><UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/ArrayFile.html" title="class in org.apache.hadoop.io"><B>ArrayFile</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/SetFile.html" title="class in org.apache.hadoop.io"><B>SetFile</B></A></UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/MapFile.Reader.html" title="class in org.apache.hadoop.io"><B>MapFile.Reader</B></A><UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/ArrayFile.Reader.html" title="class in org.apache.hadoop.io"><B>ArrayFile.Reader</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/SetFile.Reader.html" title="class in org.apache.hadoop.io"><B>SetFile.Reader</B></A></UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/MapFile.Writer.html" title="class in org.apache.hadoop.io"><B>MapFile.Writer</B></A><UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/ArrayFile.Writer.html" title="class in org.apache.hadoop.io"><B>ArrayFile.Writer</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/SetFile.Writer.html" title="class in org.apache.hadoop.io"><B>SetFile.Writer</B></A></UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/MD5Hash.html" title="class in org.apache.hadoop.io"><B>MD5Hash</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableComparable.html" title="interface in org.apache.hadoop.io">WritableComparable</A>) <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/NullWritable.html" title="class in org.apache.hadoop.io"><B>NullWritable</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/Writable.html" title="interface in org.apache.hadoop.io">Writable</A>) <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/ObjectWritable.html" title="class in org.apache.hadoop.io"><B>ObjectWritable</B></A> (implements org.apache.hadoop.conf.<A HREF="../../../../org/apache/hadoop/conf/Configurable.html" title="interface in org.apache.hadoop.conf">Configurable</A>, org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/Writable.html" title="interface in org.apache.hadoop.io">Writable</A>) <LI TYPE="circle">java.io.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/io/OutputStream.html" title="class or interface in java.io"><B>OutputStream</B></A> (implements java.io.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/io/Closeable.html" title="class or interface in java.io">Closeable</A>, java.io.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/io/Flushable.html" title="class or interface in java.io">Flushable</A>) <UL> <LI TYPE="circle">java.io.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/io/FilterOutputStream.html" title="class or interface in java.io"><B>FilterOutputStream</B></A><UL> <LI TYPE="circle">java.io.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/io/DataOutputStream.html" title="class or interface in java.io"><B>DataOutputStream</B></A> (implements java.io.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/io/DataOutput.html" title="class or interface in java.io">DataOutput</A>) <UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/DataOutputBuffer.html" title="class in org.apache.hadoop.io"><B>DataOutputBuffer</B></A></UL> </UL> </UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/SequenceFile.html" title="class in org.apache.hadoop.io"><B>SequenceFile</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/SequenceFile.Reader.html" title="class in org.apache.hadoop.io"><B>SequenceFile.Reader</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/SequenceFile.Sorter.html" title="class in org.apache.hadoop.io"><B>SequenceFile.Sorter</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/SequenceFile.Sorter.SegmentDescriptor.html" title="class in org.apache.hadoop.io"><B>SequenceFile.Sorter.SegmentDescriptor</B></A> (implements java.lang.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/lang/Comparable.html" title="class or interface in java.lang">Comparable</A>&lt;T&gt;) <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/SequenceFile.Writer.html" title="class in org.apache.hadoop.io"><B>SequenceFile.Writer</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/Text.html" title="class in org.apache.hadoop.io"><B>Text</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableComparable.html" title="interface in org.apache.hadoop.io">WritableComparable</A>) <LI TYPE="circle">java.lang.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/lang/Throwable.html" title="class or interface in java.lang"><B>Throwable</B></A> (implements java.io.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/io/Serializable.html" title="class or interface in java.io">Serializable</A>) <UL> <LI TYPE="circle">java.lang.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/lang/Exception.html" title="class or interface in java.lang"><B>Exception</B></A><UL> <LI TYPE="circle">java.io.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/io/IOException.html" title="class or interface in java.io"><B>IOException</B></A><UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/VersionMismatchException.html" title="class in org.apache.hadoop.io"><B>VersionMismatchException</B></A></UL> </UL> </UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/TwoDArrayWritable.html" title="class in org.apache.hadoop.io"><B>TwoDArrayWritable</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/Writable.html" title="interface in org.apache.hadoop.io">Writable</A>) <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/UTF8.html" title="class in org.apache.hadoop.io"><B>UTF8</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableComparable.html" title="interface in org.apache.hadoop.io">WritableComparable</A>) <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/VersionedWritable.html" title="class in org.apache.hadoop.io"><B>VersionedWritable</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/Writable.html" title="interface in org.apache.hadoop.io">Writable</A>) <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/VIntWritable.html" title="class in org.apache.hadoop.io"><B>VIntWritable</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableComparable.html" title="interface in org.apache.hadoop.io">WritableComparable</A>) <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/VLongWritable.html" title="class in org.apache.hadoop.io"><B>VLongWritable</B></A> (implements org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableComparable.html" title="interface in org.apache.hadoop.io">WritableComparable</A>) <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableComparator.html" title="class in org.apache.hadoop.io"><B>WritableComparator</B></A> (implements java.util.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/util/Comparator.html" title="class or interface in java.util">Comparator</A>&lt;T&gt;) <UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/BooleanWritable.Comparator.html" title="class in org.apache.hadoop.io"><B>BooleanWritable.Comparator</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/BytesWritable.Comparator.html" title="class in org.apache.hadoop.io"><B>BytesWritable.Comparator</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/FloatWritable.Comparator.html" title="class in org.apache.hadoop.io"><B>FloatWritable.Comparator</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/IntWritable.Comparator.html" title="class in org.apache.hadoop.io"><B>IntWritable.Comparator</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/LongWritable.Comparator.html" title="class in org.apache.hadoop.io"><B>LongWritable.Comparator</B></A><UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/LongWritable.DecreasingComparator.html" title="class in org.apache.hadoop.io"><B>LongWritable.DecreasingComparator</B></A></UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/MD5Hash.Comparator.html" title="class in org.apache.hadoop.io"><B>MD5Hash.Comparator</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/Text.Comparator.html" title="class in org.apache.hadoop.io"><B>Text.Comparator</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/UTF8.Comparator.html" title="class in org.apache.hadoop.io"><B>UTF8.Comparator</B></A></UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableFactories.html" title="class in org.apache.hadoop.io"><B>WritableFactories</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableName.html" title="class in org.apache.hadoop.io"><B>WritableName</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableUtils.html" title="class in org.apache.hadoop.io"><B>WritableUtils</B></A></UL> </UL> <H2> Interface Hierarchy </H2> <UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/Closeable.html" title="interface in org.apache.hadoop.io"><B>Closeable</B></A><LI TYPE="circle">java.lang.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/lang/Comparable.html" title="class or interface in java.lang"><B>Comparable</B></A>&lt;T&gt;<UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableComparable.html" title="interface in org.apache.hadoop.io"><B>WritableComparable</B></A> (also extends org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/Writable.html" title="interface in org.apache.hadoop.io">Writable</A>) </UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/SequenceFile.Sorter.RawKeyValueIterator.html" title="interface in org.apache.hadoop.io"><B>SequenceFile.Sorter.RawKeyValueIterator</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/SequenceFile.ValueBytes.html" title="interface in org.apache.hadoop.io"><B>SequenceFile.ValueBytes</B></A><LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/Writable.html" title="interface in org.apache.hadoop.io"><B>Writable</B></A><UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableComparable.html" title="interface in org.apache.hadoop.io"><B>WritableComparable</B></A> (also extends java.lang.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/lang/Comparable.html" title="class or interface in java.lang">Comparable</A>&lt;T&gt;) </UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/WritableFactory.html" title="interface in org.apache.hadoop.io"><B>WritableFactory</B></A></UL> <H2> Enum Hierarchy </H2> <UL> <LI TYPE="circle">java.lang.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/lang/Object.html" title="class or interface in java.lang"><B>Object</B></A><UL> <LI TYPE="circle">java.lang.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/lang/Enum.html" title="class or interface in java.lang"><B>Enum</B></A>&lt;E&gt; (implements java.lang.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/lang/Comparable.html" title="class or interface in java.lang">Comparable</A>&lt;T&gt;, java.io.<A HREF="http://java.sun.com/j2se/1.5/docs/api/java/io/Serializable.html" title="class or interface in java.io">Serializable</A>) <UL> <LI TYPE="circle">org.apache.hadoop.io.<A HREF="../../../../org/apache/hadoop/io/SequenceFile.CompressionType.html" title="enum in org.apache.hadoop.io"><B>SequenceFile.CompressionType</B></A></UL> </UL> </UL> <HR> <!-- ======= START OF BOTTOM NAVBAR ====== --> <A NAME="navbar_bottom"><!-- --></A> <A HREF="#skip-navbar_bottom" title="Skip navigation links"></A> <TABLE BORDER="0" WIDTH="100%" CELLPADDING="1" CELLSPACING="0" SUMMARY=""> <TR> <TD COLSPAN=2 BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A NAME="navbar_bottom_firstrow"><!-- --></A> <TABLE BORDER="0" CELLPADDING="0" CELLSPACING="3" SUMMARY=""> <TR ALIGN="center" VALIGN="top"> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../overview-summary.html"><FONT CLASS="NavBarFont1"><B>Overview</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="package-summary.html"><FONT CLASS="NavBarFont1"><B>Package</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <FONT CLASS="NavBarFont1">Class</FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <FONT CLASS="NavBarFont1">Use</FONT>&nbsp;</TD> <TD BGCOLOR="#FFFFFF" CLASS="NavBarCell1Rev"> &nbsp;<FONT CLASS="NavBarFont1Rev"><B>Tree</B></FONT>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../deprecated-list.html"><FONT CLASS="NavBarFont1"><B>Deprecated</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../index-all.html"><FONT CLASS="NavBarFont1"><B>Index</B></FONT></A>&nbsp;</TD> <TD BGCOLOR="#EEEEFF" CLASS="NavBarCell1"> <A HREF="../../../../help-doc.html"><FONT CLASS="NavBarFont1"><B>Help</B></FONT></A>&nbsp;</TD> </TR> </TABLE> </TD> <TD ALIGN="right" VALIGN="top" ROWSPAN=3><EM> </EM> </TD> </TR> <TR> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> &nbsp;<A HREF="../../../../org/apache/hadoop/fs/s3/package-tree.html"><B>PREV</B></A>&nbsp; &nbsp;<A HREF="../../../../org/apache/hadoop/io/compress/package-tree.html"><B>NEXT</B></A></FONT></TD> <TD BGCOLOR="white" CLASS="NavBarCell2"><FONT SIZE="-2"> <A HREF="../../../../index.html?org/apache/hadoop/io/package-tree.html" target="_top"><B>FRAMES</B></A> &nbsp; &nbsp;<A HREF="package-tree.html" target="_top"><B>NO FRAMES</B></A> &nbsp; &nbsp;<SCRIPT type="text/javascript"> <!-- if(window==top) { document.writeln('<A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A>'); } //--> </SCRIPT> <NOSCRIPT> <A HREF="../../../../allclasses-noframe.html"><B>All Classes</B></A> </NOSCRIPT> </FONT></TD> </TR> </TABLE> <A NAME="skip-navbar_bottom"></A> <!-- ======== END OF BOTTOM NAVBAR ======= --> <HR> Copyright &copy; 2006 The Apache Software Foundation </BODY> </HTML>
apache-2.0
elveahuang/platform
platform-commons/platform-commons-storage/src/main/java/cn/elvea/platform/commons/storage/oss/OssStorageConfig.java
602
package cn.elvea.platform.commons.storage.oss; import lombok.Data; import java.io.Serializable; /** * 阿里云存储配置参数 * * @author elvea * @since 0.0.1 */ @Data public class OssStorageConfig implements Serializable { /** * Endpoint */ private String endpoint = ""; /** * Access Key Id */ private String accessKeyId = ""; /** * Access Key Secret */ private String accessKeySecret = ""; /** * Bucket Name */ private String bucketName = ""; /** * 自定义域名 */ private String domain = ""; }
apache-2.0
evilfirefox/refuel2
src/VagueSoftware/Refuel2Bundle/Presenter/PresenterFactory.php
949
<?php namespace VagueSoftware\Refuel2Bundle\Presenter; use VagueSoftware\Refuel2Bundle\Exception\Presenter\PresenterNotFoundException; /** * Class PresenterFactory * @package VagueSoftware\Refuel2Bundle\Presenter */ class PresenterFactory { /** * @var array */ private $presenters = []; /** * @param string $class * @param PresenterInterface $presenter * @return PresenterFactory */ public function registerPresenter(string $class, PresenterInterface $presenter): PresenterFactory { $this->presenters[$class] = $presenter; return $this; } /** * @param string $class * @return PresenterInterface */ public function getPresenter(string $class): PresenterInterface { if (!array_key_exists($class, $this->presenters)) { throw new PresenterNotFoundException($class); } return $this->presenters[$class]; } }
apache-2.0
ryan-nauman/Shrtn
Shrtn/Shrtn.cs
3095
using System; using System.Collections.Generic; using System.Linq; using System.Text; using Shrtn.Entity; using Shrtn.Entity.Encoders; namespace Shrtn { /// <summary> /// Utility class that takes integers such as a primary key id and turns them into short strings using base conversion. /// </summary> public static class Shorten { /// <summary> /// Encode an integer using the default encoder /// </summary> /// <param name="value">Value to be encoded</param> /// <returns>An integer encoded to a string</returns> public static string Encode(ulong value) { return Encode(value, EncoderTypes.CrockfordLower); } /// <summary> /// Encode an integer and specify one of the builtin encoders /// </summary> /// <param name="value">Value to be encoded</param> /// <param name="encoderType">The encoder to be used</param> /// <returns>An integer encoded to a string</returns> public static string Encode(ulong value, EncoderTypes encoderType) { EncoderFactory factory = new EncoderFactory(); BaseEncoder encoder = factory.GetEncoder(encoderType); return encoder.Encode(value); } /// <summary> /// Encode an integer using a custom encoder /// </summary> /// <param name="value">Value to be encoded</param> /// <param name="encoder">The custom encoder to be used</param> /// <returns>An integer encoded to a string</returns> public static string Encode(ulong value, BaseEncoder encoder) { return encoder.Encode(value); } /// <summary> /// Decode a string using the default encoder /// </summary> /// <param name="encodedValue">The encoded string</param> /// <returns>A converted integer</returns> public static ulong Decode(string encodedValue) { return Decode(encodedValue, EncoderTypes.CrockfordLower); } /// <summary> /// Decode a string and specify one of the builtin encoders /// </summary> /// <param name="encodedValue">The encoded string</param> /// <param name="encoderType">The encoder used on this string</param> /// <returns>A converted integer</returns> public static ulong Decode(string encodedValue, EncoderTypes encoderType) { EncoderFactory factory = new EncoderFactory(); BaseEncoder encoder = factory.GetEncoder(encoderType); return encoder.Decode(encodedValue); } /// <summary> /// Decode a string using a custom encoder /// </summary> /// <param name="encodedValue">The encoded string</param> /// <param name="encoder">The custom encoder to be used</param> /// <returns>A converted integer</returns> public static ulong Decode(string encodedValue, BaseEncoder encoder) { return encoder.Decode(encodedValue); } } }
apache-2.0
onhate/schemorger
src/main/java/org/schema/MusicAlbum.java
525
package org.schema; /** * * A collection of music tracks. * * @fullPath Thing > CreativeWork > MusicPlaylist > MusicAlbum * * @author Texelz (by Onhate) * */ public class MusicAlbum extends MusicPlaylist { private MusicGroup byArtist; /** * The artist that performed this album or recording. */ public MusicGroup getByArtist() { return this.byArtist; } /** * The artist that performed this album or recording. */ public void setByArtist(MusicGroup byArtist) { this.byArtist = byArtist; } }
apache-2.0
project-oak/oak
third_party/expect/src/lib.rs
2176
// Simple equality test macro that returns an Err value on mismatch. // Adapted from standard library assert_eq! macro. #[macro_export] macro_rules! expect_eq { ($left:expr, $right:expr) => {{ match (&$left, &$right) { (left_val, right_val) => { if !(*left_val == *right_val) { return ::std::result::Result::Err(::std::boxed::Box::new( ::std::io::Error::new( ::std::io::ErrorKind::Other, ::std::format!( r#"{}:{}: expectation failed: `(left == right)` left: `{:?}`, right: `{:?}`"#, file!(), line!(), &*left_val, &*right_val ), ), )); } } } }}; } // Check if an expression matches a pattern, and generate an Err value if not. // Adapted from https://github.com/murarth/assert_matches. #[macro_export] macro_rules! expect_matches { ( $e:expr , $pat:pat ) => { match $e { $pat => (), ref e => { return ::std::result::Result::Err(::std::boxed::Box::new(::std::io::Error::new( ::std::io::ErrorKind::Other, ::std::format!( "{}:{}: expectation failed: `{:?}` does not match `{}`", file!(), line!(), e, stringify!($pat) ), ))); } } }; } #[macro_export] macro_rules! expect { ( $e:expr ) => { if !$e { return ::std::result::Result::Err(::std::boxed::Box::new(::std::io::Error::new( ::std::io::ErrorKind::Other, ::std::format!( "{}:{}: expectation failed: {:?} is false", file!(), line!(), stringify!($e) ), ))); } }; }
apache-2.0
Claudio1986/Punto_control
src/cl/puntocontrol/struts/action/TransportistasBuscarAction.java
1697
package cl.puntocontrol.struts.action; import java.util.ArrayList; import java.util.List; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.struts.action.Action; import org.apache.struts.action.ActionForm; import org.apache.struts.action.ActionForward; import org.apache.struts.action.ActionMapping; import cl.puntocontrol.hibernate.dao.DAOTransportista; import cl.puntocontrol.hibernate.domain.Transportista; import cl.puntocontrol.hibernate.domain.Usuario; import cl.puntocontrol.struts.form.TransportistasForm; public class TransportistasBuscarAction extends Action { @Override public ActionForward execute(ActionMapping mapping, ActionForm _form, HttpServletRequest request, HttpServletResponse response) throws Exception { TransportistasForm form = (TransportistasForm)_form; try{ List<Transportista> transportistas = new ArrayList<Transportista>(); transportistas=DAOTransportista.list("",""); form.setTransportistas(transportistas); form.setEstado(0); form.setNombre_transportista(""); form.setRut_transportista(""); form.setSap_transportista(""); String userName = (String)request.getSession().getAttribute("userName"); String password = (String)request.getSession().getAttribute("password"); Usuario usuario = UsuarioUtil.checkUser(userName, password); form.setUsuario(usuario); form.setSuccessMessage(""); return mapping.findForward("success"); }catch(Exception ex){ form.setErrorMessage("Ha Ocurrido Un Error Inesperado."); return mapping.findForward("error"); }finally{ } } }
apache-2.0
deeplearning4j/deeplearning4j
nd4j/nd4j-tensorflow/src/main/java/org/nd4j/tensorflow/conversion/TensorflowConversion.java
16741
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ package org.nd4j.tensorflow.conversion; import org.nd4j.shade.protobuf.InvalidProtocolBufferException; import org.bytedeco.javacpp.*; import org.bytedeco.javacpp.indexer.*; import org.nd4j.linalg.api.buffer.DataBuffer; import org.nd4j.linalg.api.buffer.DataType; import org.nd4j.linalg.api.concurrency.AffinityManager; import org.nd4j.linalg.api.ndarray.INDArray; import org.nd4j.linalg.compression.CompressedDataBuffer; import org.nd4j.linalg.compression.CompressionDescriptor; import org.nd4j.linalg.factory.Nd4j; import org.nd4j.common.util.ArrayUtil; import org.nd4j.tensorflow.conversion.graphrunner.SavedModelConfig; import org.tensorflow.framework.MetaGraphDef; import org.tensorflow.framework.SignatureDef; import org.tensorflow.framework.TensorInfo; import java.io.IOException; import java.nio.file.Files; import java.nio.file.Paths; import java.util.Map; import org.bytedeco.tensorflow.*; import static org.bytedeco.tensorflow.global.tensorflow.*; /** * Interop between nd4j {@link INDArray} * and {@link TF_Tensor} * * @author Adam Gibson */ public class TensorflowConversion { //used for passing to tensorflow: this dummy de allocator //allows us to use nd4j buffers for memory management //rather than having them managed by tensorflow private static Deallocator_Pointer_long_Pointer calling; private static TensorflowConversion INSTANCE; /** * Get a singleton instance * @return */ public static TensorflowConversion getInstance() { if(INSTANCE == null) INSTANCE = new TensorflowConversion(); return INSTANCE; } private TensorflowConversion() { if(calling == null) calling = DummyDeAllocator.getInstance(); } /** * Convert an {@link INDArray} * to a {@link TF_Tensor} * with zero copy. * Uses a direct pointer to the underlying ndarray's * data * @param ndArray the ndarray to use * @return the equivalent {@link TF_Tensor} */ public TF_Tensor tensorFromNDArray(INDArray ndArray) { if(ndArray == null) { throw new IllegalArgumentException("NDArray must not be null!"); } //we infer data type from the ndarray.databuffer() //for now we throw an exception if(ndArray.data() == null) { throw new IllegalArgumentException("Unable to infer data type from null databuffer"); } if(ndArray.isView() || ndArray.ordering() != 'c') { ndArray = ndArray.dup('c'); } long[] ndShape = ndArray.shape(); long[] tfShape = new long[ndShape.length]; System.arraycopy(ndShape, 0, tfShape, 0, ndShape.length); int type; DataBuffer data = ndArray.data(); DataType dataType = data.dataType(); switch (dataType) { case DOUBLE: type = DT_DOUBLE; break; case FLOAT: type = DT_FLOAT; break; case INT: type = DT_INT32; break; case HALF: type = DT_HALF; break; case COMPRESSED: CompressedDataBuffer compressedData = (CompressedDataBuffer)data; CompressionDescriptor desc = compressedData.getCompressionDescriptor(); String algo = desc.getCompressionAlgorithm(); switch (algo) { case "FLOAT16": type = DT_HALF; break; case "INT8": type = DT_INT8; break; case "UINT8": type = DT_UINT8; break; case "INT16": type = DT_INT16; break; case "UINT16": type = DT_UINT16; break; default: throw new IllegalArgumentException("Unsupported compression algorithm: " + algo); } break; case SHORT: type = DT_INT16; break; case LONG: type = DT_INT64; break; case UTF8: type = DT_STRING; break; case BYTE: type = DT_INT8; break; case UBYTE: type = DT_UINT8; break; case UINT16: type = DT_UINT16; break; case UINT32: type = DT_UINT32; break; case UINT64: type = DT_UINT64; break; case BFLOAT16: type = DT_BFLOAT16; break; case BOOL: type = DT_BOOL; break; default: throw new IllegalArgumentException("Unsupported data type: " + dataType); } try { Nd4j.getAffinityManager().ensureLocation(ndArray, AffinityManager.Location.HOST); } catch (Exception e) { // ND4J won't let us access compressed data in GPU memory, so we'll leave TensorFlow do the conversion instead ndArray.getDouble(0); // forces decompression and data copy to host data = ndArray.data(); dataType = data.dataType(); switch (dataType) { case DOUBLE: type = DT_DOUBLE; break; case FLOAT: type = DT_FLOAT; break; case INT: type = DT_INT32; break; case LONG: type = DT_INT64; break; case UTF8: type = DT_STRING; break; default: throw new IllegalArgumentException("Unsupported data type: " + dataType); } } LongPointer longPointer = new LongPointer(tfShape); TF_Tensor tf_tensor = null; if (type == DT_STRING) { long size = 0; long length = ndArray.length(); BytePointer[] strings = new BytePointer[(int)length]; for (int i = 0; i < length; i++) { strings[i] = new BytePointer(ndArray.getString(i)); size += TF_StringEncodedSize(strings[i].capacity()); } tf_tensor = TF_AllocateTensor( type, longPointer, tfShape.length, 8 * length + size); long offset = 0; BytePointer tf_data = new BytePointer(TF_TensorData(tf_tensor)).capacity(TF_TensorByteSize(tf_tensor)); TF_Status status = TF_NewStatus(); for (int i = 0; i < length; i++) { tf_data.position(8 * i).putLong(offset); offset += TF_StringEncode(strings[i], strings[i].capacity() - 1, tf_data.position(8 * length + offset), tf_data.capacity() - tf_data.position(), status); if (TF_GetCode(status) != TF_OK) { throw new IllegalStateException("ERROR: Unable to convert tensor " + TF_Message(status).getString()); } } TF_DeleteStatus(status); } else { tf_tensor = TF_NewTensor( type, longPointer, tfShape.length, data.pointer(), data.length() * data.getElementSize(), calling,null); } return tf_tensor; } /** * Convert a {@link INDArray} * to a {@link TF_Tensor} * using zero copy. * It will use the underlying * pointer with in nd4j. * @param tensor the tensor to use * @return */ public INDArray ndArrayFromTensor(TF_Tensor tensor) { int rank = TF_NumDims(tensor); int[] ndShape; if (rank == 0) { // scalar ndShape = new int[] { 1 }; } else { ndShape = new int[rank]; for (int i = 0; i < ndShape.length; i++) { ndShape[i] = (int) TF_Dim(tensor,i); } } int tfType = TF_TensorType(tensor); DataType nd4jType = typeFor(tfType); int length = ArrayUtil.prod(ndShape); INDArray array; if (nd4jType == DataType.UTF8) { String[] strings = new String[length]; BytePointer data = new BytePointer(TF_TensorData(tensor)).capacity(TF_TensorByteSize(tensor)); BytePointer str = new BytePointer((Pointer)null); SizeTPointer size = new SizeTPointer(1); TF_Status status = TF_NewStatus(); for (int i = 0; i < length; i++) { long offset = data.position(8 * i).getLong(); TF_StringDecode(data.position(8 * length + offset), data.capacity() - data.position(), str, size, status); if (TF_GetCode(status) != TF_OK) { throw new IllegalStateException("ERROR: Unable to convert tensor " + TF_Message(status).getString()); } strings[i] = str.position(0).capacity(size.get()).getString(); } TF_DeleteStatus(status); array = Nd4j.create(strings); } else { Pointer pointer = TF_TensorData(tensor).capacity(length); Indexer indexer = indexerForType(nd4jType,pointer); DataBuffer d = Nd4j.createBuffer(indexer.pointer(),nd4jType,length,indexer); array = Nd4j.create(d,ndShape); } // we don't need this in this case. Device memory will be updated right in the constructor //Nd4j.getAffinityManager().tagLocation(array, AffinityManager.Location.HOST); return array; } private Indexer indexerForType(DataType type,Pointer pointer) { switch(type) { case DOUBLE: return DoubleIndexer.create(new DoublePointer(pointer)); case FLOAT: return FloatIndexer.create(new FloatPointer(pointer)); case INT: return IntIndexer.create(new IntPointer(pointer)); case LONG: return LongIndexer.create(new LongPointer(pointer)); case SHORT: return ShortIndexer.create(new ShortPointer(pointer)); case BYTE: return ByteIndexer.create(new BytePointer(pointer)); case UBYTE: return UByteIndexer.create(new BytePointer(pointer)); case UINT16: return UShortIndexer.create(new ShortPointer(pointer)); case UINT32: return UIntIndexer.create(new IntPointer(pointer)); case UINT64: return ULongIndexer.create(new LongPointer(pointer)); case BFLOAT16: return Bfloat16Indexer.create(new ShortPointer(pointer)); case HALF: return HalfIndexer.create(new ShortPointer(pointer)); case BOOL: return BooleanIndexer.create(new BooleanPointer(pointer)); default: throw new IllegalArgumentException("Illegal type " + type); } } private DataType typeFor(int tensorflowType) { switch(tensorflowType) { case DT_DOUBLE: return DataType.DOUBLE; case DT_FLOAT: return DataType.FLOAT; case DT_HALF: return DataType.HALF; case DT_INT16: return DataType.SHORT; case DT_INT32: return DataType.INT; case DT_INT64: return DataType.LONG; case DT_STRING: return DataType.UTF8; case DT_INT8: return DataType.BYTE; case DT_UINT8: return DataType.UBYTE; case DT_UINT16: return DataType.UINT16; case DT_UINT32: return DataType.UINT32; case DT_UINT64: return DataType.UINT64; case DT_BFLOAT16: return DataType.BFLOAT16; case DT_BOOL: return DataType.BOOL; default: throw new IllegalArgumentException("Illegal type " + tensorflowType); } } /** * Get an initialized {@link TF_Graph} * based on the passed in file * (the file must be a binary protobuf/pb file) * The graph will be modified to be associated * with the device associated with this current thread. * * Depending on the active {@link Nd4j#getBackend()} * the device will either be the gpu pinned to the current thread * or the cpu * @param filePath the path to the file to read * @return the initialized graph * @throws IOException */ public TF_Graph loadGraph(String filePath, TF_Status status) throws IOException { byte[] bytes = Files.readAllBytes(Paths.get(filePath)); return loadGraph(bytes, status); } /** * Infers the device for the given thread * based on the {@link Nd4j#getAffinityManager()} * Usually, this will either be a gpu or cpu * reserved for the current device. * You can think of the "current thread" * as a worker. This is mainly useful with multiple gpus * @return */ public static String defaultDeviceForThread() { Integer deviceForThread = Nd4j.getAffinityManager().getDeviceForCurrentThread(); String deviceName = null; //gpu if(Nd4j.getBackend().getClass().getName().contains("JCublasBackend")) { deviceName = "/device:gpu:" + deviceForThread; } else { deviceName = "/device:cpu:" + deviceForThread; } return deviceName; } /** * Get an initialized {@link TF_Graph} * based on the passed in byte array content * (the content must be a binary protobuf/pb file) * The graph will be modified to be associated * with the device associated with this current thread. * * Depending on the active {@link Nd4j#getBackend()} * the device will either be the gpu pinned to the current thread * or the content * @param content the path to the file to read * @return the initialized graph * @throws IOException */ public TF_Graph loadGraph(byte[] content, TF_Status status) { byte[] toLoad = content; TF_Buffer graph_def = TF_NewBufferFromString(new BytePointer(toLoad), content.length); TF_Graph graphC = TF_NewGraph(); TF_ImportGraphDefOptions opts = TF_NewImportGraphDefOptions(); TF_GraphImportGraphDef(graphC, graph_def, opts, status); if (TF_GetCode(status) != TF_OK) { throw new IllegalStateException("ERROR: Unable to import graph " + TF_Message(status).getString()); } TF_DeleteImportGraphDefOptions(opts); return graphC; } /** * Load a session based on the saved model * @param savedModelConfig the configuration for the saved model * @param options the session options to use * @param runOptions the run configuration to use * @param graph the tf graph to use * @param inputsMap the input map * @param outputsMap the output names * @param status the status object to use for verifying the results * @return */ public TF_Session loadSavedModel(SavedModelConfig savedModelConfig, TF_SessionOptions options, TF_Buffer runOptions, TF_Graph graph, Map<String, String> inputsMap, Map<String, String> outputsMap, TF_Status status) { TF_Buffer metaGraph = TF_Buffer.newBuffer(); TF_Session session = TF_LoadSessionFromSavedModel(options, runOptions, new BytePointer(savedModelConfig.getSavedModelPath()), new BytePointer(savedModelConfig.getModelTag()), 1, graph, metaGraph, status); if (TF_GetCode(status) != TF_OK) { throw new IllegalStateException("ERROR: Unable to import model " + TF_Message(status).getString()); } MetaGraphDef metaGraphDef; try { metaGraphDef = MetaGraphDef.parseFrom(metaGraph.data().capacity(metaGraph.length()).asByteBuffer()); } catch (InvalidProtocolBufferException ex) { throw new IllegalStateException("ERROR: Unable to import model " + ex); } Map<String, SignatureDef> signatureDefMap = metaGraphDef.getSignatureDefMap(); SignatureDef signatureDef = signatureDefMap.get(savedModelConfig.getSignatureKey()); Map<String, TensorInfo> inputs = signatureDef.getInputsMap(); for (Map.Entry<String, TensorInfo> e : inputs.entrySet()) { inputsMap.put(e.getKey(), e.getValue().getName()); } Map<String, TensorInfo> outputs = signatureDef.getOutputsMap(); for (Map.Entry<String, TensorInfo> e : outputs.entrySet()) { outputsMap.put(e.getKey(), e.getValue().getName()); } return session; } }
apache-2.0
kailas96/kailas96.github.io
header_footer.html
3424
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <meta name="keywords" content="Official website of Kumaranalloor Sree Nagaraja Temple, Sree Nagaraja Temple, Kumaranallor, Vishwakarmasamajam ,Tamil Vishwakarmasamajam No:54"> <meta name="description" content="Official Website of Sree Nagaraja Temple, Kumaranallor, Kottayam, Kerala."> <meta name="viewport" content="width=device-width, initial-scale=1.0"> <link rel="shortcut icon" type="image/png" href="favicon.png"/> <link rel="icon" type="image/png" href="favicon.png"/> <title>Official website of Kumaranalloor Sree Nagaraja Temple</title> <!-- ******************************** CSS ******************************** --> <!-- Latest compiled and minified CSS --> <link rel="stylesheet" href="//maxcdn.bootstrapcdn.com/bootstrap/3.3.4/css/bootstrap.min.css"> <link href="css/custom.css" rel="stylesheet" type="text/css"> <link href="css/pace-preloder.css" rel="stylesheet" type="text/css"> <link href="http://maxcdn.bootstrapcdn.com/font-awesome/4.1.0/css/font-awesome.min.css" rel="stylesheet"> <link rel="stylesheet" href="css/animate.min.css" type="text/css"> </head> <body> <!-- ******************************** Navigation ******************************** --> <nav class="navbar navbar-custom navbar-fixed-top navigation" id="nav" role="navigation"> <!-- Brand and toggle get grouped for better mobile display --> <div class="navbar-header"> <button type="button" class="navbar-toggle" id="navbar-toggle" data-toggle="collapse" data-target="#menu"> <i class="fa fa-bars"></i> </button> <a class="navbar-brand" style="color:orange" href="index.html"> <div class="brand"><img class="brand_logo" alt="" src="Resource/Common/sitelogo.png"/>Sree Nagaraja Temple </div> </a> </div> <!-- Collect the nav links, forms, and other content for toggling --> <div class="collapse navbar-collapse" id="menu"> <ul class="nav navbar-nav navbar-center"> <li class="active"><a href="index.html">Home</a> </li> <li><a href="aboutfestival.html">About</a> </li> <li><a href="gallery.html">Gallery</a> </li> <li><a href="contactus.html">Contact</a> </li> </ul> </div> <!-- .Navbar-collapse --> <!-- container- --> </nav> <!-- ******************************** Footer ******************************** --> <footer> <div class="footerData"> <p> ©2017 Kumaranalloor Sree Nagaraja Temple. All rights reserved. <br> Developed by <a href="mailto:[email protected]" target="_blank">Vishnu Kailas</a> </p> </div> </footer> <!-- ******************************** Scripts /******************************** --> <script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.2/jquery.min.js"></script> <script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.4/js/bootstrap.min.js"></script> <script type="text/javascript" src="js/common.js"></script> <script type="text/javascript" src="js/pace.min.js"></script> <script type="text/javascript" src="js/wow.min.js"></script> <script> new WOW().init(); </script> </body> </html>
apache-2.0
yipen9/spatial4j
src/main/java/com/spatial4j/core/shape/jts/JtsGeometry.java
21456
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.spatial4j.core.shape.jts; import com.spatial4j.core.context.SpatialContext; import com.spatial4j.core.context.jts.JtsSpatialContext; import com.spatial4j.core.exception.InvalidShapeException; import com.spatial4j.core.shape.Circle; import com.spatial4j.core.shape.Point; import com.spatial4j.core.shape.Rectangle; import com.spatial4j.core.shape.Shape; import com.spatial4j.core.shape.SpatialRelation; import com.spatial4j.core.shape.impl.BufferedLineString; import com.spatial4j.core.shape.impl.PointImpl; import com.spatial4j.core.shape.impl.Range; import com.spatial4j.core.shape.impl.RectangleImpl; import com.vividsolutions.jts.geom.Coordinate; import com.vividsolutions.jts.geom.CoordinateSequence; import com.vividsolutions.jts.geom.CoordinateSequenceFilter; import com.vividsolutions.jts.geom.Envelope; import com.vividsolutions.jts.geom.Geometry; import com.vividsolutions.jts.geom.GeometryCollection; import com.vividsolutions.jts.geom.GeometryFilter; import com.vividsolutions.jts.geom.IntersectionMatrix; import com.vividsolutions.jts.geom.LineString; import com.vividsolutions.jts.geom.Lineal; import com.vividsolutions.jts.geom.LinearRing; import com.vividsolutions.jts.geom.Polygon; import com.vividsolutions.jts.geom.Puntal; import com.vividsolutions.jts.geom.prep.PreparedGeometry; import com.vividsolutions.jts.geom.prep.PreparedGeometryFactory; import com.vividsolutions.jts.operation.union.UnaryUnionOp; import com.vividsolutions.jts.operation.valid.IsValidOp; import java.util.ArrayList; import java.util.List; /** * Wraps a JTS {@link Geometry} (i.e. may be a polygon or basically anything). * JTS does a great deal of the hard work, but there is work here in handling * dateline wrap. */ public class JtsGeometry implements Shape { /** * System property boolean that can disable auto validation in an assert. */ public static final String SYSPROP_ASSERT_VALIDATE = "spatial4j.JtsGeometry.assertValidate"; private final Geometry geom;//cannot be a direct instance of GeometryCollection as it doesn't support relate() private final boolean hasArea; private final Rectangle bbox; protected final JtsSpatialContext ctx; protected PreparedGeometry preparedGeometry; protected boolean validated = false; public JtsGeometry(Geometry geom, JtsSpatialContext ctx, boolean dateline180Check, boolean allowMultiOverlap) { this.ctx = ctx; //GeometryCollection isn't supported in relate() if (geom.getClass().equals(GeometryCollection.class)) throw new IllegalArgumentException("JtsGeometry does not support GeometryCollection but does support its subclasses."); //NOTE: All this logic is fairly expensive. There are some short-circuit checks though. if (ctx.isGeo()) { //Unwraps the geometry across the dateline so it exceeds the standard geo bounds (-180 to +180). if (dateline180Check) unwrapDateline(geom);//potentially modifies geom //If given multiple overlapping polygons, fix it by union if (allowMultiOverlap) geom = unionGeometryCollection(geom);//returns same or new geom //Cuts an unwrapped geometry back into overlaid pages in the standard geo bounds. geom = cutUnwrappedGeomInto360(geom);//returns same or new geom assert geom.getEnvelopeInternal().getWidth() <= 360; assert !geom.getClass().equals(GeometryCollection.class) : "GeometryCollection unsupported";//double check //Compute bbox bbox = computeGeoBBox(geom); } else {//not geo //If given multiple overlapping polygons, fix it by union if (allowMultiOverlap) geom = unionGeometryCollection(geom);//returns same or new geom Envelope env = geom.getEnvelopeInternal(); bbox = new RectangleImpl(env.getMinX(), env.getMaxX(), env.getMinY(), env.getMaxY(), ctx); } geom.getEnvelopeInternal();//ensure envelope is cached internally, which is lazy evaluated. Keeps this thread-safe. this.geom = geom; assert assertValidate();//kinda expensive but caches valid state this.hasArea = !((geom instanceof Lineal) || (geom instanceof Puntal)); } /** * called via assertion */ private boolean assertValidate() { String assertValidate = System.getProperty(SYSPROP_ASSERT_VALIDATE); if (assertValidate == null || Boolean.parseBoolean(assertValidate)) validate(); return true; } /** * Validates the shape, throwing a descriptive error if it isn't valid. Note that this * is usually called automatically by default, but that can be disabled. * * @throws InvalidShapeException with descriptive error if the shape isn't valid */ public void validate() throws InvalidShapeException { if (!validated) { IsValidOp isValidOp = new IsValidOp(geom); if (!isValidOp.isValid()) throw new InvalidShapeException(isValidOp.getValidationError().toString()); validated = true; } } /** * Adds an index to this class internally to compute spatial relations faster. In JTS this * is called a {@link com.vividsolutions.jts.geom.prep.PreparedGeometry}. This * isn't done by default because it takes some time to do the optimization, and it uses more * memory. Calling this method isn't thread-safe so be careful when this is done. If it was * already indexed then nothing happens. */ public void index() { if (preparedGeometry == null) preparedGeometry = PreparedGeometryFactory.prepare(geom); } @Override public boolean isEmpty() { return geom.isEmpty(); } /** * Given {@code geoms} which has already been checked for being in world * bounds, return the minimal longitude range of the bounding box. */ protected Rectangle computeGeoBBox(Geometry geoms) { if (geoms.isEmpty()) return new RectangleImpl(Double.NaN, Double.NaN, Double.NaN, Double.NaN, ctx); final Envelope env = geoms.getEnvelopeInternal();//for minY & maxY (simple) if (env.getWidth() > 180 && geoms.getNumGeometries() > 1) { // This is ShapeCollection's bbox algorithm Range xRange = null; for (int i = 0; i < geoms.getNumGeometries(); i++) { Envelope envI = geoms.getGeometryN(i).getEnvelopeInternal(); Range xRange2 = new Range.LongitudeRange(envI.getMinX(), envI.getMaxX()); if (xRange == null) { xRange = xRange2; } else { xRange = xRange.expandTo(xRange2); } if (xRange == Range.LongitudeRange.WORLD_180E180W) break; // can't grow any bigger } return new RectangleImpl(xRange.getMin(), xRange.getMax(), env.getMinY(), env.getMaxY(), ctx); } else { return new RectangleImpl(env.getMinX(), env.getMaxX(), env.getMinY(), env.getMaxY(), ctx); } } @Override public JtsGeometry getBuffered(double distance, SpatialContext ctx) { //TODO doesn't work correctly across the dateline. The buffering needs to happen // when it's transiently unrolled, prior to being sliced. return this.ctx.makeShape(geom.buffer(distance), true, true); } @Override public boolean hasArea() { return hasArea; } @Override public double getArea(SpatialContext ctx) { double geomArea = geom.getArea(); if (ctx == null || geomArea == 0) return geomArea; //Use the area proportional to how filled the bbox is. double bboxArea = getBoundingBox().getArea(null);//plain 2d area assert bboxArea >= geomArea; double filledRatio = geomArea / bboxArea; return getBoundingBox().getArea(ctx) * filledRatio; // (Future: if we know we use an equal-area projection then we don't need to // estimate) } @Override public Rectangle getBoundingBox() { return bbox; } @Override public JtsPoint getCenter() { if (isEmpty()) //geom.getCentroid == null return new JtsPoint(ctx.getGeometryFactory().createPoint((Coordinate) null), ctx); return new JtsPoint(geom.getCentroid(), ctx); } @Override public SpatialRelation relate(Shape other) { if (other instanceof Point) return relate((Point) other); else if (other instanceof Rectangle) return relate((Rectangle) other); else if (other instanceof Circle) return relate((Circle) other); else if (other instanceof JtsGeometry) return relate((JtsGeometry) other); else if (other instanceof BufferedLineString) throw new UnsupportedOperationException("Can't use BufferedLineString with JtsGeometry"); return other.relate(this).transpose(); } public SpatialRelation relate(Point pt) { if (!getBoundingBox().relate(pt).intersects()) return SpatialRelation.DISJOINT; Geometry ptGeom; if (pt instanceof JtsPoint) ptGeom = ((JtsPoint) pt).getGeom(); else ptGeom = ctx.getGeometryFactory().createPoint(new Coordinate(pt.getX(), pt.getY())); return relate(ptGeom);//is point-optimized } public SpatialRelation relate(Rectangle rectangle) { SpatialRelation bboxR = bbox.relate(rectangle); if (bboxR == SpatialRelation.WITHIN || bboxR == SpatialRelation.DISJOINT) return bboxR; // FYI, the right answer could still be DISJOINT or WITHIN, but we don't know yet. return relate(ctx.getGeometryFrom(rectangle)); } public SpatialRelation relate(Circle circle) { SpatialRelation bboxR = bbox.relate(circle); if (bboxR == SpatialRelation.WITHIN || bboxR == SpatialRelation.DISJOINT) return bboxR; //Test each point to see how many of them are outside of the circle. //TODO consider instead using geom.apply(CoordinateSequenceFilter) -- maybe faster since avoids Coordinate[] allocation Coordinate[] coords = geom.getCoordinates(); int outside = 0; int i = 0; for (Coordinate coord : coords) { i++; SpatialRelation sect = circle.relate(new PointImpl(coord.x, coord.y, ctx)); if (sect == SpatialRelation.DISJOINT) outside++; if (i != outside && outside != 0)//short circuit: partially outside, partially inside return SpatialRelation.INTERSECTS; } if (i == outside) { return (relate(circle.getCenter()) == SpatialRelation.DISJOINT) ? SpatialRelation.DISJOINT : SpatialRelation.CONTAINS; } assert outside == 0; return SpatialRelation.WITHIN; } public SpatialRelation relate(JtsGeometry jtsGeometry) { //don't bother checking bbox since geom.relate() does this already return relate(jtsGeometry.geom); } protected SpatialRelation relate(Geometry oGeom) { //see http://docs.geotools.org/latest/userguide/library/jts/dim9.html#preparedgeometry if (oGeom instanceof com.vividsolutions.jts.geom.Point) { if (preparedGeometry != null) return preparedGeometry.disjoint(oGeom) ? SpatialRelation.DISJOINT : SpatialRelation.CONTAINS; return geom.disjoint(oGeom) ? SpatialRelation.DISJOINT : SpatialRelation.CONTAINS; } if (preparedGeometry == null) return intersectionMatrixToSpatialRelation(geom.relate(oGeom)); else if (preparedGeometry.covers(oGeom)) return SpatialRelation.CONTAINS; else if (preparedGeometry.coveredBy(oGeom)) return SpatialRelation.WITHIN; else if (preparedGeometry.intersects(oGeom)) return SpatialRelation.INTERSECTS; return SpatialRelation.DISJOINT; } public static SpatialRelation intersectionMatrixToSpatialRelation(IntersectionMatrix matrix) { //As indicated in SpatialRelation javadocs, Spatial4j CONTAINS & WITHIN are // OGC's COVERS & COVEREDBY if (matrix.isCovers()) return SpatialRelation.CONTAINS; else if (matrix.isCoveredBy()) return SpatialRelation.WITHIN; else if (matrix.isDisjoint()) return SpatialRelation.DISJOINT; return SpatialRelation.INTERSECTS; } @Override public String toString() { return geom.toString(); } @Override public boolean equals(Object o) { if (this == o) return true; if (o == null || getClass() != o.getClass()) return false; JtsGeometry that = (JtsGeometry) o; return geom.equalsExact(that.geom);//fast equality for normalized geometries } @Override public int hashCode() { //FYI if geometry.equalsExact(that.geometry), then their envelopes are the same. return geom.getEnvelopeInternal().hashCode(); } public Geometry getGeom() { return geom; } /** * If <code>geom</code> spans the dateline, then this modifies it to be a * valid JTS geometry that extends to the right of the standard -180 to +180 * width such that some points are greater than +180 but some remain less. * Takes care to invoke {@link com.vividsolutions.jts.geom.Geometry#geometryChanged()} * if needed. * * @return The number of times the geometry spans the dateline. >= 0 */ private static int unwrapDateline(Geometry geom) { if (geom.getEnvelopeInternal().getWidth() < 180) return 0;//can't possibly cross the dateline final int[] crossings = {0};//an array so that an inner class can modify it. geom.apply(new GeometryFilter() { @Override public void filter(Geometry geom) { int cross = 0; if (geom instanceof LineString) {//note: LinearRing extends LineString if (geom.getEnvelopeInternal().getWidth() < 180) return;//can't possibly cross the dateline cross = unwrapDateline((LineString) geom); } else if (geom instanceof Polygon) { if (geom.getEnvelopeInternal().getWidth() < 180) return;//can't possibly cross the dateline cross = unwrapDateline((Polygon) geom); } else return; crossings[0] = Math.max(crossings[0], cross); } });//geom.apply() return crossings[0]; } /** * See {@link #unwrapDateline(Geometry)}. */ private static int unwrapDateline(Polygon poly) { LineString exteriorRing = poly.getExteriorRing(); int cross = unwrapDateline(exteriorRing); if (cross > 0) { //TODO TEST THIS! Maybe bug if doesn't cross but is in another page? for (int i = 0; i < poly.getNumInteriorRing(); i++) { LineString innerLineString = poly.getInteriorRingN(i); unwrapDateline(innerLineString); for (int shiftCount = 0; !exteriorRing.contains(innerLineString); shiftCount++) { if (shiftCount > cross) throw new IllegalArgumentException("The inner ring doesn't appear to be within the exterior: " + exteriorRing + " inner: " + innerLineString); shiftGeomByX(innerLineString, 360); } } poly.geometryChanged(); } return cross; } /** * See {@link #unwrapDateline(Geometry)}. */ private static int unwrapDateline(LineString lineString) { CoordinateSequence cseq = lineString.getCoordinateSequence(); int size = cseq.size(); if (size <= 1) return 0; int shiftX = 0;//invariant: == shiftXPage*360 int shiftXPage = 0; int shiftXPageMin = 0/* <= 0 */, shiftXPageMax = 0; /* >= 0 */ double prevX = cseq.getX(0); for (int i = 1; i < size; i++) { double thisX_orig = cseq.getX(i); assert thisX_orig >= -180 && thisX_orig <= 180 : "X not in geo bounds"; double thisX = thisX_orig + shiftX; if (prevX - thisX > 180) {//cross dateline from left to right thisX += 360; shiftX += 360; shiftXPage += 1; shiftXPageMax = Math.max(shiftXPageMax, shiftXPage); } else if (thisX - prevX > 180) {//cross dateline from right to left thisX -= 360; shiftX -= 360; shiftXPage -= 1; shiftXPageMin = Math.min(shiftXPageMin, shiftXPage); } if (shiftXPage != 0) cseq.setOrdinate(i, CoordinateSequence.X, thisX); prevX = thisX; } if (lineString instanceof LinearRing) { assert cseq.getCoordinate(0).equals(cseq.getCoordinate(size - 1)); assert shiftXPage == 0;//starts and ends at 0 } assert shiftXPageMax >= 0 && shiftXPageMin <= 0; //Unfortunately we are shifting again; it'd be nice to be smarter and shift once shiftGeomByX(lineString, shiftXPageMin * -360); int crossings = shiftXPageMax - shiftXPageMin; if (crossings > 0) lineString.geometryChanged(); return crossings; } private static void shiftGeomByX(Geometry geom, final int xShift) { if (xShift == 0) return; geom.apply(new CoordinateSequenceFilter() { @Override public void filter(CoordinateSequence seq, int i) { seq.setOrdinate(i, CoordinateSequence.X, seq.getX(i) + xShift); } @Override public boolean isDone() { return false; } @Override public boolean isGeometryChanged() { return true; } }); } private static Geometry unionGeometryCollection(Geometry geom) { if (geom instanceof GeometryCollection) { return geom.union(); } return geom; } /** * This "pages" through standard geo boundaries offset by multiples of 360 * longitudinally that intersect geom, and the intersecting results of a page * and the geom are shifted into the standard -180 to +180 and added to a new * geometry that is returned. */ private static Geometry cutUnwrappedGeomInto360(Geometry geom) { Envelope geomEnv = geom.getEnvelopeInternal(); if (geomEnv.getMinX() >= -180 && geomEnv.getMaxX() <= 180) return geom; assert geom.isValid() : "geom"; //TODO opt: support geom's that start at negative pages -- // ... will avoid need to previously shift in unwrapDateline(geom). List<Geometry> geomList = new ArrayList<Geometry>(); //page 0 is the standard -180 to 180 range for (int page = 0; true; page++) { double minX = -180 + page * 360; if (geomEnv.getMaxX() <= minX) break; Geometry rect = geom.getFactory().toGeometry(new Envelope(minX, minX + 360, -90, 90)); assert rect.isValid() : "rect"; Geometry pageGeom = rect.intersection(geom);//JTS is doing some hard work assert pageGeom.isValid() : "pageGeom"; shiftGeomByX(pageGeom, page * -360); geomList.add(pageGeom); } return UnaryUnionOp.union(geomList); } // private static Geometry removePolyHoles(Geometry geom) { // //TODO this does a deep copy of geom even if no changes needed; be smarter // GeometryTransformer gTrans = new GeometryTransformer() { // @Override // protected Geometry transformPolygon(Polygon geom, Geometry parent) { // if (geom.getNumInteriorRing() == 0) // return geom; // return factory.createPolygon((LinearRing) geom.getExteriorRing(),null); // } // }; // return gTrans.transform(geom); // } // // private static Geometry snapAndClean(Geometry geom) { // return new GeometrySnapper(geom).snapToSelf(GeometrySnapper.computeOverlaySnapTolerance(geom), true); // } }
apache-2.0
jiangerji/my-utils
store.baidu.com/cache/html/2009.html
20437
<!DOCTYPE html> <html> <head> <meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1" /> <meta http-equiv=Content-Type content="text/html;charset=utf-8"> <meta property="wb:webmaster" content="e635a420782119b7" /> <meta name="keywords" content="可穿戴,可穿戴设备,百度可穿戴设备,百度可穿戴,智能设备,智能可穿戴设备,超智能设备,百度智能设备,便携设备,便携智能设备,百度便携设备, 人体设备,智能人体设备,百度人体设备,便携人体设备,dulife,dulife平台,奇酷网,奇酷,360奇酷,小米酷玩,小米酷玩频道,百度硬件,智能硬件,硬件,智能移动设备,智能移动硬件 移动设备,移动硬件,可穿戴硬件,点名时间,母亲节"> <meta name="description" content="百度未来商店作为行业内首个基于生活需求和场景的智能硬件信息互动平台,秉承“科技改变生活”的理念,通过智能硬件提升人们在运动、睡眠、生活、出行、娱乐等方面的品质,让用户更有效的关爱自身健康和家庭生活,让科技真正融入到老百姓的生活中。平台将围绕可穿戴、智能家居、安全出行等几大领域,提供最前沿、最时尚、最实用、最专业的硬件产品介绍、服务定制、最新资讯、圈子交流等服务,打造行业最专业的智能硬件信息互动平台。"> <!--[if lte IE 7]> <div class="goodbye-modal hide"></div> <div class="goodbye-ie hide" id="goodbyeIE"> <p>您的浏览器太旧啦~为了获得更好的体验,强烈建议您使用以下浏览器:</p> <ul class="browers clearfix"> <li class="chrome"> <a target="_blank" href="https://www.google.com/intl/en/chrome/browser/"></a> <span>chrome</span> </li> <li class="firefox"> <a target="_blank" href="http://www.firefox.com.cn/download/"></a> <span>firefox</span> </li> <li class="ie9"> <a target="_blank" href="http://windows.microsoft.com/zh-cn/internet-explorer/download-ie"></a> <span>IE9+</span> </li> </ul> <p class="no-tip"><a id="iknow" href="javascript:void(0);">知道啦</a></p> </div> <![endif]--> <script> void function(g,f,j,c,h,d,b){g.alogObjectName=h,g[h]=g[h]||function(){(g[h].q=g[h].q||[]).push(arguments)},g[h].l=g[h].l||+new Date,d=f.createElement(j),d.async=!0,d.src=c,b=f.getElementsByTagName(j)[0],b.parentNode.insertBefore(d,b)}(window,document,"script","http://img.baidu.com/hunter/alog/alog.min.js","alog");void function(){function c(){return;}window.PDC={mark:function(a,b){alog("speed.set",a,b||+new Date);alog.fire&&alog.fire("mark")},init:function(a){alog("speed.set","options",a)},view_start:c,tti:c,page_ready:c}}();void function(n){var o=!1;n.onerror=function(n,e,t,c){var i=!0;return!e&&/^script error/i.test(n)&&(o?i=!1:o=!0),i&&alog("exception.send","exception",{msg:n,js:e,ln:t,col:c}),!1},alog("exception.on","catch",function(n){alog("exception.send","exception",{msg:n.msg,js:n.path,ln:n.ln,method:n.method,flag:"catch"})})}(window); </script> <link rel="icon" href="/static/common/favicon.ico" type="image/x-icon"> <link rel="shorticon icon" href="/static/common/favicon.ico" type="image/x-icon"> <meta property="wb:webmaster" content="18de27f07b76316f" /> <meta name="baidu-tc-cerfication" content="a7fc1bfc58a8b04c738569e2d3c418dc" /> <meta name="baidu-site-verification" content="OZIMDr2iVS" /> <script type="text/javascript"> window.duin = window.duin || {}; duin.userinfo = { islogin:'0', displayname: "", role: "", avatar: "" //头像 }; window.login_wrapper_url = 'http://passport.baidu.com/passApi/js/uni_login_wrapper.js?cdnversion=201411181725'; var _hmt = _hmt || []; _hmt.push(['_setAccount', '3d1ae725881ab60e9d4d62efe134498c']); </script> <title>百度未来商店-OutRunner:奔跑的遥控小巨人来了</title> <link rel="stylesheet" type="text/css" href="/static/common/pkg/common_aa2cbaf.css"/><link rel="stylesheet" type="text/css" href="/static/discovery/pkg/single-artical_09c7d24.css"/></head> <body> <script> alog('speed.set', 'ht', +new Date); </script> <script>with(document)0[(getElementsByTagName('head')[0]||body).appendChild(createElement('script')).src='http://img.baidu.com/hunter/kaifangyun.js?st='+~(new Date()/864e5)];</script> <script> window.hostType = "news"; window.hostId = "2009"; </script> <div class="nav" > <div class="nav-header clearfix"> <a href="/" class="fl-l nav-logo"></a> <div class="nav-right clearfix"> <div class="nav-info"> <span class="nav-not-login"><a href="#" class="nav-login">登录</a></span></div> <ul class="nav-list clearfix"> <li class="nav-home-item"><a href="/" class="">首页</a></li> <li class="nav-store-wrapper"><div class="nav-store">全部设备</div> <ul class="nav-store-list"> <li><a href="/sports">运动</a></li> <li><a href="/parlor">客厅</a></li> <li><a href="/sleep">睡眠</a></li> </ul> </li> <li class="nav-hot-item"><a class=" " href="/tryout/list">试用</a><span class="nav-hot"></span></li> <li><a class="" href="/discovery-news">发现</a></li> <li class="nav-last-li"><a class="" href="/product/ushome">合作</a></li> </ul> <div class="nav-search "> <form action="/search/index" id="searchForm" method="POST"> <input type="hidden" name="pn" value="1"> <input type="hidden" name="limit" value="12"> <input type="text" class="nav-search-input" autocomplete="off" name="scontent" value=""> <span class="nav-search-icon"></span> </form> </div> </div> </div> </div> <div class="d-main"> <div class="bread-path"> <a href="/">首页</a><span class="icon-path">&gt;</span><a href="/discovery-news">发现</a><span class="icon-path">&gt;</span><span>OutRunner:奔跑的遥控小巨人来了</span> </div> <div class="d-out-ad" id="friend"> <ul class="d-out-ad-wrapper"> <li><a href="http://store.baidu.com/trysome/view/2.html" target="_blank"><img src="http://bs.baidu.com/dulife/54698d532f196.jpg" height="100" width="960" title="未来试用第二期"></a></li> </ul> </div> <div class="d-content clearfix"> <div class="d-content-sub"> <div class="d-hot-recommend"> <div class="d-h-r-head"> <a href="javascript:void(0);" class="d-h-normal d-h-active">热门资讯</a> <span class="d-h-split"></span> <a href="javascript:void(0);" class="d-h-normal">小编推荐</a></div> <div class="d-h-r-content"> <div class="d-h-r-c-wrapper" id="hotEvaluation"> </div> <div class="d-h-r-c-wrapper" id="editorRecommend"> </div> </div> </div> <div class="d-best-recommend" id='bestRecommendWrapper'> </div> <div class="d-guess-like"> <div class="d-g-l-header clearfix"> <a href="javascript:;" class="d-g-l-change">换一批</a> <div class="d-g-l-title">猜你喜欢</div> </div> <div class="d-g-l-content" id="uLike"> </div> </div> </div> <div class="d-content-main"> <div class="d-artical-title">OutRunner:奔跑的遥控小巨人来了</div> <div class="d-a-info clearfix"> <div class="d-a-operation"><a href="#comment" class="d-a-o-comment">评论(<span id="commentCount">0</span>)</a><a href="#dLikeHash" class="d-a-o-like">喜欢(<span id="likeCount">19</span>)</a><span class="d-share clearfix"><span class="bdsharebuttonbox"><a href="#" class="bds_more" data-cmd="more">分享<span class="d-down-icon"></span></a></span></span></div> <div class="d-a-from"> <span class="d-a-f-name">来源:雷锋网 http://www.leiphone.com/outrunner-running-robot.html</span> <span class="d-public-time" id="modifyTime"></span> </div> </div> <div class="d-summary">你已经有了小型遥控汽车、遥控飞机、遥控船和遥控潜水艇,嗯,还有什么可以遥控呢?遥控一些能跑的东西怎么样?这就是OutRunner现在在干的事。</div> <div class="d-artical-content" id="sourceContent"> 你已经有了小型遥控汽车、遥控飞机、遥控船和遥控潜水艇,嗯,还有什么可以遥控呢?<a href="http://www.leiphone.com/wrtnode-machine-openhardware.html" target="_blank">遥控</a>一些能跑的东西怎么样?这就是OutRunner现在在干的事。 <a href="http://leiphone.qiniudn.com/uploads/2014/05/outrunner.png"><img alt="outrunner" src="http://leiphone.qiniudn.com/uploads/2014/05/outrunner.png" width="630" height="420" data-pinit="registered" /></a> OutRunner声称自己是世界上第一台可以奔跑的<a href="http://www.leiphone.com/flying-3d-printer-robot.html" target="_blank">遥控机器人</a>,你可以花不到250美元在kickstarter上众筹一台。这个可爱的机器人是由一家独立新公司Robotics Unlimited推出的,这家公司集机器人、电子和机械设计于一身。 其实,会跑的机器人并不少见,但是这个机器人的奔跑方式却很特别,它是依靠多条“长腿”滚动前行的,有点像古代的水车或是现在的摩天轮。 <a href="http://leiphone.qiniudn.com/uploads/2014/05/outrunner-1.jpg"><img alt="outrunner-1" src="http://leiphone.qiniudn.com/uploads/2014/05/outrunner-1.jpg" width="700" height="467" data-pinit="registered" /></a> Robotics Unlimited公司在kickstarter上说道:“通过使OutRunner的重心低于旋转腿轴心,利用浮力效应,机器人在前进的时候能够获得固有的稳定性,减少对价格昂贵的传感器和复杂控制算法的需求。” 当然,机器人可以通过旋转其重心来被操控。据说,已生产的机器人版本可以从静止开始奔跑,尽管视频里显示只能运动中开始。 <a href="http://leiphone.qiniudn.com/uploads/2014/05/outrunner-2.png"><img alt="outrunner-2" src="http://leiphone.qiniudn.com/uploads/2014/05/outrunner-2.png" width="630" height="445" data-pinit="registered" /></a> Robotics Unlimited公司计划推出两种版本的OutRunner机器人,分别为OutRunner Core和OutRunner Performance。OutRunner Core会是一种基础款,采用的是塑料材质,拥有6条“长腿”。其速度可达16km/h,电池续航能力为1小时,另外装备了一个万能相机底座。这款机器人是由一个标准的远程控制装置控制的,如果你只是想和机器人一起玩,那么这种版本非常适合你。 另一方面,OutRunner Performance是专为发烧友定制,采用了最先进的传感器。其速度可达32km/h,电池续航能力为2小时,它配备了一个720p像素的车载摄像头,可通过Wi-Fi实时传输视频。这个版本可由一个远程控制装置或一个配对的智能手机app来控制,用户可以根据自身需求增加“长腿”数量,多至12条。 另外,每条腿都有一个内置的减震器,使<a href="http://www.leiphone.com/d-wrtnode-machine-team.html" target="_blank">机器人</a>相当稳定。如果你对这个机器人确实很感兴趣的话,笔者可以告诉你,目前,这款产品正在Kickstarter上进行筹款,Core和Performance的售价分别为249美元和799美元。即使Performance这款看起来还挺贵的,不过还是比预计的零售价格便宜200美元。 笔者看完视频后觉得这个机器人还蛮好玩的,长得像鲨鱼的嘴巴。这种车轮式的转动,比单独两条腿或是两个轮子行走要稳定许多。 <iframe src="https://www.kickstarter.com/projects/138364285/outrunner-the-worlds-most-advanced-running-robot/widget/video.html" height="480" width="640" frameborder="0" scrolling="no"></iframe> </div> <div class="d-fav"> <a href="javascript:;" class="d-fav-like"><span class="d-fav-icon" id="dLikeHash"></span><span class="d-fav-count">19</span></a> <span class="d-share clearfix"> <span class="bdsharebuttonbox"><a href="#" class="bds_more d-s-icon" data-cmd="more"><i></i>分享</a></span> </span> </div> <div class="d-link clearfix"> <div class="d-prev"> <span>上一篇:</span><a href="/news/2007.html" target="_blank">天气app涉足智能硬件——墨迹空气果首发体验</a> </div> <div class="d-next"> <span>下一篇:</span><a href="/news/2011.html" target="_blank">不怕晒,晒不怕!Smartsun腕带可测紫外线强度</a> </div> </div> <div class="d-sort-comment" id="comment">短评<span>(</span><span id="ctCount">0</span><span>)</span></div> <form action="/news/api/addComment" method="POST " class="ct-form" name="ct-form" data-hostid="2009" data-committype="" data-hosttype="news"> <div class="clearfix"><textarea name="comment" aria-required="true" placeholder="同学,你怎么看?"></textarea></div> <div class="ct-submit"> <span class="ct-count"><span class="prefix">还能输入</span>&nbsp;<strong class="ct-limit">150</strong>&nbsp;个字</span><button type="submit" class="btn btn-primary">提交</button> </div> </form> <ul class="ct-list" id="ct-list-full"> <li class="empty hide">还木有评论哦,快来抢沙发吧~~</li> </ul> <div id="ct-page"></div> <script type="text/template" id="ct-tpl"> <% $.each(list, function(idx, item) { %> <li class="clearfix"> <div class="avatar"><img src="<%- item.user_icon %>" width="60" height="60" title="<%- item.user_name %>"></div> <div class="cont"> <div class="ut"> <span class="uname text-overflow"><%- item.user_name %></span><span class="date"><%- item.create_time %></span> </div> <%if(item.parent) { %> <div class="quote"> <div class="uname">@&nbsp;<span><%- item.parent.user_name %></span></div> <div class="qct"><%- item.parent.content %></div> </div> <% } %> <div class="ct"><%- item.content %></div> <%if(!item.parent) { %> <div class="tb"><a href="#" data-pid="<%- item.reply_id %>"><i></i>回复</a></div> <% } %> </div> </li> <% }); %> </script> </div> </div> </div> <script> window._bd_share_config={ "common":{"bdSnsKey":{}, "bdText":"", "bdMini":"1", "bdMiniList":["tsina","qzone","weixin","renren","tqq","douban","sqq"], "bdPic":"", "bdStyle":"0", "bdSize":"32", },"share":{} }; with(document)0[(getElementsByTagName('head')[0]||body).appendChild(createElement('script')).src='http://bdimg.share.baidu.com/static/api/js/share.js?v=86835285.js?cdnversion='+~(-new Date()/36e5)]; </script> <script type="text/template" id="hotRecommend"> <%if(!_.isEmpty(list)){ %> <ul class="d-h-r-c clearfix"> <% $.each(list, function(idx, item) { %> <li <%if(idx<3){ %> class="d-h-top3"<% } %>> <span class="d-h-r-c-no"><%-idx+1%></span> <a href="/news/<%-item.id%>.html" target="_blank"><%-item.title%></a> </li> <% }); %> </ul> <% } %> </script> <script type="text/template" id="guessLike"> <%if(!_.isEmpty(list)){ %> <ul> <% $.each(list, function(idx, item) { %> <li class="clearfix"> <a href="/product/view/<%-item.product_id%>.html" class="d-thumbnail" style="background-image:url(<%-item.product_cover_img%>) " target="_blank"> </a> <div class="d-g-l-c"> <a href="/product/view/<%-item.product_id%>.html" class="d-g-l-c-title" target="_blank"><%-item.product_name%></a> <a href="/product/view/<%-item.product_id%>.html#evaluation" class="d-g-l-c-write" target="_blank">写评测</a> </div> </li> <% }); %> </ul> <% } %> </script> <script type="text/template" id="guessLikeNews"> <%if(!_.isEmpty(list)){ %> <ul> <% $.each(list, function(idx, item) { %> <li class="clearfix"> <a href="/news/<%-item.id%>.html" target="_blank" class="d-thumbnail" style="background-image:url(<%-item.thumbnails%>);" > </a> <div class="d-g-l-c"> <a href="/news/<%-item.id%>.html" target="_blank" class="d-g-l-c-title"><%-item.title%></a> <a href="/news/<%-item.id%>.html#comment" target="_blank" class="d-g-l-c-count"><%-item.comment_count%></a> </div> </li> <% }); %> </ul> <% } %> </script> <script type="text/template" id="bestRecommend"> <%if(!_.isEmpty(list)){ %> <div class="d-b-r-head clearfix"> <div class="d-b-r-operator"> <% $.each(list, function(idx, item) { %> <a href="javascript:;" class="d-b-r-o-icon <%if(idx==0){ %> active <% } %>"></a> <% }); %> </div> <div class="d-b-r-title">精品推荐</div></div> <div class="d-b-r-content clearfix"> <% $.each(list, function(idx, item) { %> <div class="d-b-r-c"> <a href="/product/view/<%-item.product_id%>.html" target="_blank"><img src="<%-item.product_cover_img%>"></a> <a href="/product/view/<%-item.product_id%>.html" target="_blank" class="d-b-r-c-desc"> <%-item.product_name%> </a> </div> <% }); %> </div> <% } %> </script> <div class="footer clearfix"> <p class="fl-l">©2014 baidu<a href="http://home.baidu.com/" target="_blank">关于百度</a> <a href="/about" target="_blank">关于我们</a> <a href="http://www.baidu.com/duty/" target="_blank">使用前必读</a> <a href="/admin/index" target="_blank" class="hide" id="admin-entry">管理</a> <a href="/product/uscreateProduct?product_id=" target="_blank" class="hide" id="admin-product-edit">编辑</a> <a href="/product/usprovision" class="hide" id="admin-provision">服务条款</a> </p> <div class="fl-r link"> <a href="http://weibo.com/dulife" target="_blank" class="sina icon">新浪微博</a><a href="https://wx.qq.com/" target="_blank" class="weixin icon" style="visibility:hidden;">微信</a> </div> </div> <div class="backtop"></div> </body><script type="text/javascript" src="/static/common/pkg/common_56f87e5.js"></script> <script type="text/javascript" src="/static/discovery/pkg/discovery_baf8100.js"></script> <script type="text/javascript">!function(){var cookieName = 'duin_ie_tip'; if (document.cookie.indexOf(cookieName) === -1) { $('.goodbye-modal').show(); $('.goodbye-ie').show(); } $('#iknow').click(function() { $('.goodbye-modal').hide(); $('.goodbye-ie').hide(); document.cookie = cookieName + (+new Date); }); }(); !function(){ var href = location.href; $('.nav-login').click(function(e) { e.preventDefault(); duin.login(); }); $('.nav-logout').attr('href', 'http://passport.baidu.com?logout&tpl=mco_web&u=' + href); }(); !function(){require('common:widget/nav/nav.js'); duin.nav.searchFuc(); duin.nav.adjustWidth(); }(); !function(){require('common:widget/footer/footer.js'); }(); !function(){try { if (console && console.log) { console.log('%c', 'padding:12px 59px;line-height:60px;'); console.log('\n想挑战规模够大、协作够复杂的前端团队协作?\n' + '想寻求理念够新、自由度够高的前端成长空间?\n' + '想把前端做成极富创造力、极富成就感的一份工作?\n' + 'or 前端不仅仅是一份工作,而是你的理想和全部?\n\n' + '快到碗里来!\n' + '有些事,我们不做,就没人做了!\n' + '你不来,我们就没办法一起快乐玩耍了!\n' + '学历、年龄、性别、取向、节操通通不限,产品/代码洁癖患者优先考虑。\n'); console.info && console.info('请将简历发送至 %c [email protected]( 邮件标题请以“姓名-前端-来自百度未来商店”命名)', 'color:red'); console.info && console.info('职位介绍:http://dwz.cn/mbufe'); } } catch (e) {} }(); !function(){ window.modifyTime = duin.dateUtil.format('yyyy-MM-dd HH:mm:ss', new Date(parseInt('1399860098')*1000)).substring(0, 10); }(); !function(){ jQuery(function() { alog('speed.set', 'drt', +new Date); }); }();</script><div class="hide"> <script type="text/javascript"> var _bdhmProtocol = (("https:" == document.location.protocol) ? " https://" : " http://"); document.write(unescape("%3Cscript src='" + _bdhmProtocol + "hm.baidu.com/h.js%3F0696c093b1f3ccb4153ab2e25fa7f411' type='text/javascript'%3E%3C/script%3E")); </script> </div> <div class="hide"> <script type="text/javascript"> var _bdhmProtocol = (("https:" == document.location.protocol) ? " https://" : " http://"); document.write(unescape("%3Cscript src='" + _bdhmProtocol + "hm.baidu.com/h.js%3F3d1ae725881ab60e9d4d62efe134498c' type='text/javascript'%3E%3C/script%3E")); </script> </div> <div class="hide"> <script type="text/javascript"> var _bdhmProtocol = (("https:" == document.location.protocol) ? " https://" : " http://"); document.write(unescape("%3Cscript src='" + _bdhmProtocol + "hm.baidu.com/h.js%3F91e35cba3b09a5afffa12b0db4325bee' type='text/javascript'%3E%3C/script%3E")); </script> </div> </html><!--15460008570507075594111817--> <script> var _trace_page_logid = 1546000857; </script><!--15460209550824728586111817--> <script> var _trace_page_logid = 1546020955; </script>
apache-2.0
googleads/google-ads-python
google/ads/googleads/v9/enums/types/simulation_type.py
1302
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore __protobuf__ = proto.module( package="google.ads.googleads.v9.enums", marshal="google.ads.googleads.v9", manifest={"SimulationTypeEnum",}, ) class SimulationTypeEnum(proto.Message): r"""Container for enum describing the field a simulation modifies. """ class SimulationType(proto.Enum): r"""Enum describing the field a simulation modifies.""" UNSPECIFIED = 0 UNKNOWN = 1 CPC_BID = 2 CPV_BID = 3 TARGET_CPA = 4 BID_MODIFIER = 5 TARGET_ROAS = 6 PERCENT_CPC_BID = 7 TARGET_IMPRESSION_SHARE = 8 BUDGET = 9 __all__ = tuple(sorted(__protobuf__.manifest))
apache-2.0
google-code-export/google-api-dfp-java
src/com/google/api/ads/dfp/v201306/DeactivateLabels.java
2601
/** * DeactivateLabels.java * * This file was auto-generated from WSDL * by the Apache Axis 1.4 Apr 22, 2006 (06:55:48 PDT) WSDL2Java emitter. */ package com.google.api.ads.dfp.v201306; /** * The action used for deactivating {@link Label} objects. */ public class DeactivateLabels extends com.google.api.ads.dfp.v201306.LabelAction implements java.io.Serializable { public DeactivateLabels() { } public DeactivateLabels( java.lang.String labelActionType) { super( labelActionType); } private java.lang.Object __equalsCalc = null; public synchronized boolean equals(java.lang.Object obj) { if (!(obj instanceof DeactivateLabels)) return false; DeactivateLabels other = (DeactivateLabels) obj; if (obj == null) return false; if (this == obj) return true; if (__equalsCalc != null) { return (__equalsCalc == obj); } __equalsCalc = obj; boolean _equals; _equals = super.equals(obj); __equalsCalc = null; return _equals; } private boolean __hashCodeCalc = false; public synchronized int hashCode() { if (__hashCodeCalc) { return 0; } __hashCodeCalc = true; int _hashCode = super.hashCode(); __hashCodeCalc = false; return _hashCode; } // Type metadata private static org.apache.axis.description.TypeDesc typeDesc = new org.apache.axis.description.TypeDesc(DeactivateLabels.class, true); static { typeDesc.setXmlType(new javax.xml.namespace.QName("https://www.google.com/apis/ads/publisher/v201306", "DeactivateLabels")); } /** * Return type metadata object */ public static org.apache.axis.description.TypeDesc getTypeDesc() { return typeDesc; } /** * Get Custom Serializer */ public static org.apache.axis.encoding.Serializer getSerializer( java.lang.String mechType, java.lang.Class _javaType, javax.xml.namespace.QName _xmlType) { return new org.apache.axis.encoding.ser.BeanSerializer( _javaType, _xmlType, typeDesc); } /** * Get Custom Deserializer */ public static org.apache.axis.encoding.Deserializer getDeserializer( java.lang.String mechType, java.lang.Class _javaType, javax.xml.namespace.QName _xmlType) { return new org.apache.axis.encoding.ser.BeanDeserializer( _javaType, _xmlType, typeDesc); } }
apache-2.0
topie/topie-oa
src/main/java/com/topie/asset/AssetModuleSpecification.java
1430
package com.topie.asset; import com.topie.core.dbmigrate.ModuleSpecification; import org.springframework.beans.factory.annotation.Value; import org.springframework.stereotype.Component; @Component public class AssetModuleSpecification implements ModuleSpecification { private static final String MODULE_NAME = "asset"; private static final String MODULE_NAME_UPPER = MODULE_NAME.toUpperCase(); private String type; private boolean enabled; private boolean initData; public boolean isEnabled() { return enabled; } public String getSchemaTable() { return "SCHEMA_VERSION_" + MODULE_NAME_UPPER; } public String getSchemaLocation() { return "dbmigrate." + type + "." + MODULE_NAME; } public boolean isInitData() { return initData; } public String getDataTable() { return "SCHEMA_VERSION_DATA_" + MODULE_NAME_UPPER; } public String getDataLocation() { return "dbmigrate." + type + ".data_" + MODULE_NAME; } @Value("${application.database.type}") public void setType(String type) { this.type = type; } @Value("${" + MODULE_NAME + ".dbmigrate.enabled}") public void setEnabled(boolean enabled) { this.enabled = enabled; } @Value("${" + MODULE_NAME + ".dbmigrate.initData}") public void setInitData(boolean initData) { this.initData = initData; } }
apache-2.0
AlexEndris/regtesting
RegTesting.Node/NodeLogic.cs
6729
using System; using System.Collections.Generic; using System.Diagnostics; using System.IO; using System.Linq; using System.Net; using System.Threading; using System.Threading.Tasks; using AutoMapper; using RegTesting.Contracts; using RegTesting.Contracts.Domain; using RegTesting.Contracts.DTO; using RegTesting.Tests.Core; using Browser = RegTesting.Tests.Core.Browser; namespace RegTesting.Node { class NodeLogic { /// <summary> /// The types for a testfile /// </summary> public string[] Types { get; set; } private readonly string _serverAdr; private readonly string _nodename; private TestcaseProvider _testcaseProvider; private readonly List<string> _browsers; private int _pollingIntervall; public NodeLogic(string serverAdr, string nodeName, List<string> browsers) { _serverAdr = serverAdr; _nodename = nodeName; _browsers = browsers; _pollingIntervall = NodeConfiguration.PollingIntervall; } public void Run() { Register(); do { EnsureBrowserClosed(); WorkItemDto workItemDto = WaitForWorkItem(); WorkItem workItem = Mapper.Map<WorkItem>(workItemDto); Console.WriteLine(@"Loading " + workItem.Testsystem.Name); UpdateTestcases(workItem.Testsystem); Console.WriteLine(@"Received" + workItem.Testsystem.Name); TestResult objTestResult = HandleTest(workItem); SendTestResultToServer(objTestResult); UnloadTestcases(); } while (true); } private void EnsureBrowserClosed() { CloseProcesses("iexplore", "firefox"); } private void CloseProcesses(params string[] processNames) { foreach (Process[] processes in processNames.Select(Process.GetProcessesByName)) { KillAll(processes); } } private void KillAll(IEnumerable<Process> processes) { foreach (Process process in processes) { try { process.Kill(); process.WaitForExit(1000 * 15); } catch (Exception) { /*Could not close Process - But at least we tried*/ } } } private void UnloadTestcases() { _testcaseProvider.Unload(); } private void SendTestResultToServer(TestResult testResult) { Console.Out.WriteLine("Result: " + testResult.TestState); using (WcfClient objWcfClient = new WcfClient(_serverAdr)) { objWcfClient.FinishedWork(_nodename, testResult); } Console.Out.WriteLine("Finished."); } private void UpdateTestcases(Testsystem testsystem) { const string testfile = @"LocalTests.dll"; byte[] data; using (WcfClient wcfClient = new WcfClient(_serverAdr)) { data = wcfClient.FetchDll(_nodename, testsystem.Name); } using (FileStream fileStream = new FileStream(testfile, FileMode.Create, FileAccess.Write)) { fileStream.Write(data, 0, data.Length); } _testcaseProvider = new TestcaseProvider(testfile); _testcaseProvider.CreateAppDomain(); } private ITestable LoadTestable(WorkItem workItem) { return _testcaseProvider.GetTestableFromTypeName(workItem.Testcase.Type); } private WorkItemDto WaitForWorkItem() { Console.Out.WriteLine("Wait for WorkItem"); do { WorkItemDto workItem = FetchWork(); if (workItem != null) return workItem; Thread.Sleep(_pollingIntervall); } while (true); } private void Register() { Console.Out.WriteLine("Register at server..."); using (WcfClient wcfClient = new WcfClient(_serverAdr)) { wcfClient.Register(_nodename, _browsers); } } private WorkItemDto FetchWork() { using (WcfClient wcfClient = new WcfClient(_serverAdr)) { return wcfClient.GetWork(_nodename); } } private TestResult HandleTest(WorkItem workItem) { TestResult testResult = new TestResult(); ITestable testable = null; List<string> log = new List<string>(); try { log.Add("Test on " + _nodename); /**1: Load Testclass **/ Console.WriteLine(@"Testing {0} {1} ({2}/{3})", workItem.Testcase.Name, workItem.Browser.Name, workItem.Testsystem.Name, workItem.Language.Languagecode); testable = LoadTestable(workItem); if (testable == null) return new TestResult { TestState = TestState.NotAvailable }; /**2: Wait for branch get ready **/ WaitOnWebExceptions(workItem); /**3: Prepare Test **/ Browser browser = new Browser() { Browserstring = workItem.Browser.Browserstring, Versionsstring = workItem.Browser.Versionsstring }; testable.SetupTest(WebDriverInitStrategy.SeleniumLocal, browser, workItem.Testsystem.Url, workItem.Language.Languagecode); /**4: Run Test **/ testable.Test(); testResult.TestState = TestState.Success; } catch (NotSupportedException notSupportedException) { Error error = CreateErrorFromException(notSupportedException); testResult.TestState = TestState.NotSupported; testResult.Error = error; } catch (TaskCanceledException taskCanceledException) { Error error = CreateErrorFromException(taskCanceledException); testResult.TestState = TestState.Canceled; testResult.Error = error; } catch (Exception exception) { ServerErrorModel serverException = null; try { if (testable != null) serverException = testable.CheckForServerError(); } catch { //Error catching serverException } Error error = CreateErrorFromException(exception); if (serverException != null) { error.Type = serverException.Type; error.Message = serverException.Message; error.InnerException = serverException.InnerException; //objError.StackTrace = serverException.StackTrace; Keep error stacktrace. } testResult.TestState = TestState.Error; testResult.Error = error; if (testable != null) testResult.Screenshot = testable.SaveScreenshot(""); } finally { if (testable != null) { testable.TeardownTest(); log.AddRange(testable.GetLogLastTime()); } testResult.Log = log; } return testResult; } private Error CreateErrorFromException(Exception exception) { Error error = new Error { Type = exception.GetType().ToString(), Message = exception.Message, StackTrace = exception.StackTrace ?? "", InnerException = (exception.InnerException != null ? exception.InnerException.ToString() : null), }; return error; } private void WaitOnWebExceptions(WorkItem workItem) { for (int intTryCount = 0; intTryCount < 10; intTryCount++) { WebClient webClient = new WebClient(); try { webClient.DownloadString("http://" + workItem.Testsystem.Url); break; } catch { //Catched an exception. Waiting for retry... Thread.Sleep(10000); } } } } }
apache-2.0
gorcz/governator
governator-core/src/test/java/com/netflix/governator/lifecycle/CircularDAG.java
1747
package com.netflix.governator.lifecycle; import com.google.inject.Injector; import com.netflix.governator.LifecycleInjectorBuilderProvider; import com.netflix.governator.annotations.WarmUp; import com.netflix.governator.guice.LifecycleInjector; import com.netflix.governator.guice.LifecycleInjectorBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.testng.annotations.Test; import javax.annotation.PreDestroy; import javax.inject.Inject; import javax.inject.Singleton; /** * There is a infinite recursion in InternalLifecycleModule.warmUpIsInDag(InternalLifecycleModule.java:150) * and InternalLifecycleModule.warmUpIsInDag(InternalLifecycleModule.java:171) that will ultimately lead to * an StackOverflowError. */ public class CircularDAG extends LifecycleInjectorBuilderProvider { @Singleton public static class A { @Inject private B b; } @Singleton public static class B { @Inject private A a; } @Singleton public static class Service { private final Logger log = LoggerFactory.getLogger(getClass()); @Inject private A a; @WarmUp public void connect() { log.info("connect"); } @PreDestroy public void disconnect() { log.info("disconnect"); } } @Test(dataProvider = "builders") public void circle(LifecycleInjectorBuilder lifecycleInjectorBuilder) throws Exception { Injector injector = lifecycleInjectorBuilder.createInjector(); injector.getInstance(Service.class); LifecycleManager manager = injector.getInstance(LifecycleManager.class); manager.start(); } }
apache-2.0
slide-lig/TopPI
src/main/java/fr/liglab/mining/mapred/AggregationMapper.java
1561
/* This file is part of TopPI - see https://github.com/slide-lig/TopPI/ Copyright 2016 Martin Kirchgessner, Vincent Leroy, Alexandre Termier, Sihem Amer-Yahia, Marie-Christine Rousset, Université Grenoble Alpes, LIG, CNRS Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 or see the LICENSE.txt file joined with this program. Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package fr.liglab.mining.mapred; import java.io.IOException; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.mapreduce.Mapper; import fr.liglab.mining.mapred.writables.ItemAndSupportWritable; import fr.liglab.mining.mapred.writables.SupportAndTransactionWritable; public class AggregationMapper extends Mapper<IntWritable, SupportAndTransactionWritable, ItemAndSupportWritable, SupportAndTransactionWritable> { private final ItemAndSupportWritable keyW = new ItemAndSupportWritable(); @Override protected void map(IntWritable key, SupportAndTransactionWritable value, Context context) throws IOException, InterruptedException { keyW.set(key.get(), value.getSupport()); context.write(this.keyW, value); } }
apache-2.0
michaelkrog/cordova-plugin-geolocation-x
src/android/dk/apaq/cordova/geolocationx/LocationUpdateService.java
11318
package dk.apaq.cordova.geolocationx; import de.greenrobot.event.EventBus; import java.util.List; import java.util.Iterator; import java.util.Date; import org.json.JSONException; import org.json.JSONObject; import android.annotation.TargetApi; import android.app.NotificationManager; import android.app.Notification; import android.app.PendingIntent; import android.app.Service; import android.content.Context; import android.content.Intent; import android.content.IntentFilter; import android.location.Criteria; import android.location.Location; import android.location.LocationListener; import android.location.LocationManager; import android.os.Build; import android.os.Bundle; import android.os.IBinder; import android.util.Log; import android.widget.Toast; import static java.lang.Math.*; public class LocationUpdateService extends Service implements LocationListener { private static final String TAG = "LocationUpdateService"; public static final String ACTION_START = "dk.apaq.cordova.geolocationx.START"; public static final String ACTION_STOP = "dk.apaq.cordova.geolocationx.STOP"; public static final String ACTION_CONFIGURE = "dk.apaq.cordova.geolocationx.CONFIGURE"; public static final String ACTION_SET_MINIMUM_DISTANCE = "dk.apaq.cordova.geolocationx.SET_MINIMUM_DISTANCE"; public static final String ACTION_SET_MINIMUM_INTERVAL = "dk.apaq.cordova.geolocationx.SET_MINIMUM_INTERVAL"; public static final String ACTION_SET_PRECISION = "dk.apaq.cordova.geolocationx.SET_PRECISION"; private static final int TWO_MINUTES = 1000 * 60 * 2; private Location lastLocation; private Boolean isDebugging = false; private String notificationTitle = ""; private String notificationText = ""; private Long locationTimeout; private String activityType; private LocationManager locationManager; private NotificationManager notificationManager; @Override public IBinder onBind(Intent intent) { Log.i(TAG, "OnBind" + intent); return null; } @Override public void onCreate() { super.onCreate(); Log.i(TAG, "OnCreate"); locationManager = (LocationManager)this.getSystemService(Context.LOCATION_SERVICE); notificationManager = (NotificationManager)this.getSystemService(Context.NOTIFICATION_SERVICE); } @Override public int onStartCommand(Intent intent, int flags, int startId) { Log.i(TAG, "Received start id " + startId + ": " + intent); if (intent != null) { Log.d(TAG, "Action: " + intent.getAction()); // debug intent values values Bundle bundle = intent.getExtras(); if(bundle != null) { for (String key : bundle.keySet()) { Object value = bundle.get(key); Log.d(TAG, String.format("%s %s (%s)", key, value.toString(), value.getClass().getName())); } } if(intent.getAction().equals(ACTION_START)) { this.startRecording(); // Build a Notification required for running service in foreground. Intent main = new Intent(this, BackgroundGpsPlugin.class); main.setFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP | Intent.FLAG_ACTIVITY_SINGLE_TOP); PendingIntent pendingIntent = PendingIntent.getActivity(this, 0, main, PendingIntent.FLAG_UPDATE_CURRENT); Notification.Builder builder = new Notification.Builder(this); builder.setContentTitle(notificationTitle); builder.setContentText(notificationText); builder.setSmallIcon(android.R.drawable.ic_menu_mylocation); builder.setContentIntent(pendingIntent); Notification notification; notification = builder.build(); notification.flags |= Notification.FLAG_ONGOING_EVENT | Notification.FLAG_FOREGROUND_SERVICE | Notification.FLAG_NO_CLEAR; startForeground(startId, notification); } if(intent.getAction().equals(ACTION_CONFIGURE)) { locationTimeout = Long.parseLong(intent.getStringExtra("locationTimeout")); isDebugging = Boolean.parseBoolean(intent.getStringExtra("isDebugging")); notificationTitle = intent.getStringExtra("notificationTitle"); notificationText = intent.getStringExtra("notificationText"); activityType = intent.getStringExtra("activityType"); Log.i(TAG, "- notificationTitle: " + notificationTitle); Log.i(TAG, "- notificationText: " + notificationText); } if(intent.getAction().equals(ACTION_SET_MINIMUM_DISTANCE)) { // TODO Log.i(TAG, "- minimumDistance: " + intent.getStringExtra("value")); } if(intent.getAction().equals(ACTION_SET_MINIMUM_INTERVAL)) { // TODO Log.i(TAG, "- minimumInterval: " + intent.getStringExtra("value")); } if(intent.getAction().equals(ACTION_SET_PRECISION)) { // TODO Log.i(TAG, "- precision: " + intent.getStringExtra("value")); } } //We want this service to continue running until it is explicitly stopped return START_REDELIVER_INTENT; } @Override public void onDestroy() { Log.w(TAG, "------------------------------------------ Destroyed Location update Service"); cleanUp(); super.onDestroy(); } @TargetApi(Build.VERSION_CODES.ICE_CREAM_SANDWICH) @Override public void onTaskRemoved(Intent rootIntent) { this.stopSelf(); super.onTaskRemoved(rootIntent); } @Override public boolean stopService(Intent intent) { Log.i(TAG, "- Received stop: " + intent); cleanUp(); if (isDebugging) { Toast.makeText(this, "Background location tracking stopped", Toast.LENGTH_SHORT).show(); } return super.stopService(intent); } /** * Start recording aggresively from all found providers */ private void startRecording() { Log.i(TAG, "startRecording"); locationManager.removeUpdates(this); // Turn on all providers aggressively List<String> matchingProviders = locationManager.getAllProviders(); for (String provider: matchingProviders) { if (provider != LocationManager.PASSIVE_PROVIDER) { locationManager.requestLocationUpdates(provider, 0, 0, this); } } } private void cleanUp() { locationManager.removeUpdates(this); stopForeground(true); } /** Determines whether one Location reading is better than the current Location fix * @param location The new Location that you want to evaluate * @param currentBestLocation The current Location fix, to which you want to compare the new one */ protected boolean isBetterLocation(Location location, Location currentBestLocation) { if (currentBestLocation == null) { // A new location is always better than no location return true; } // Check whether the new location fix is newer or older long timeDelta = location.getTime() - currentBestLocation.getTime(); boolean isSignificantlyNewer = timeDelta > TWO_MINUTES; boolean isSignificantlyOlder = timeDelta < -TWO_MINUTES; boolean isNewer = timeDelta > 0; // If it's been more than two minutes since the current location, use the new location // because the user has likely moved if (isSignificantlyNewer) { return true; // If the new location is more than two minutes older, it must be worse } else if (isSignificantlyOlder) { return false; } // Check whether the new location fix is more or less accurate int accuracyDelta = (int) (location.getAccuracy() - currentBestLocation.getAccuracy()); boolean isLessAccurate = accuracyDelta > 0; boolean isMoreAccurate = accuracyDelta < 0; boolean isSignificantlyLessAccurate = accuracyDelta > 200; // Check if the old and new location are from the same provider boolean isFromSameProvider = isSameProvider(location.getProvider(), currentBestLocation.getProvider()); // Determine location quality using a combination of timeliness and accuracy if (isMoreAccurate) { return true; } else if (isNewer && !isLessAccurate) { return true; } else if (isNewer && !isSignificantlyLessAccurate && isFromSameProvider) { return true; } return false; } /** Checks whether two providers are the same */ private boolean isSameProvider(String provider1, String provider2) { if (provider1 == null) { return provider2 == null; } return provider1.equals(provider2); } // ------------------ LOCATION LISTENER INTERFACE ------------------------- public void onLocationChanged(Location location) { Log.d(TAG, "- onLocationChanged: " + location.getLatitude() + "," + location.getLongitude() + ", accuracy: " + location.getAccuracy() + ", speed: " + location.getSpeed()); if(isDebugging){ Toast.makeText(this, "acy:"+location.getAccuracy()+",v:"+location.getSpeed(), Toast.LENGTH_LONG).show(); } if(isBetterLocation(location, lastLocation)){ Log.d(TAG, "Location is better"); lastLocation = location; // send it via bus to activity try{ JSONObject pos = new JSONObject(); JSONObject loc = new JSONObject(); loc.put("latitude", location.getLatitude()); loc.put("longitude", location.getLongitude()); loc.put("accuracy", location.getAccuracy()); loc.put("speed", location.getSpeed()); loc.put("bearing", location.getBearing()); loc.put("altitude", location.getAltitude()); pos.put("coords", loc); pos.put("timestamp", new Date().getTime()); EventBus.getDefault().post(pos); Log.d(TAG, "posting location to bus"); }catch(JSONException e){ Log.e(TAG, "could not parse location"); } }else{ Log.d(TAG, "Location is no better than current"); } } public void onProviderDisabled(String provider) { // TODO Auto-generated method stub Log.d(TAG, "- onProviderDisabled: " + provider); } public void onProviderEnabled(String provider) { // TODO Auto-generated method stub Log.d(TAG, "- onProviderEnabled: " + provider); } public void onStatusChanged(String provider, int status, Bundle extras) { // TODO Auto-generated method stub Log.d(TAG, "- onStatusChanged: " + provider + ", status: " + status); } // -------------------------- LOCATION LISTENER INTERFACE END ------------------------- }
apache-2.0
Jason0204/jasontek_f103rb-zephyrOS-project
samples/net/zperf/src/zperf_shell.c
24559
/* * Copyright (c) 2016 Intel Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #define SYS_LOG_DOMAIN "net/zperf" #include <ctype.h> #include <stdio.h> #include <stdlib.h> #include <zephyr.h> #include <misc/printk.h> #include <misc/shell.h> #include <net/net_ip.h> #include <net/net_core.h> #include "zperf.h" #include "zperf_internal.h" #include "shell_utils.h" #include "zperf_session.h" /* Get some useful debug routings from net_private.h, requires * that NET_DEBUG is set. */ #define NET_DEBUG 1 #include "net_private.h" #include "ipv6.h" /* to get infinite lifetime */ #define DEVICE_NAME "zperf shell" static const char *CONFIG = "unified" #if defined(CONFIG_WLAN) " wlan" #endif #if defined(CONFIG_ETHERNET) " ethernet" #endif #if defined(CONFIG_NET_IPV4) " ipv4" #endif #if defined(CONFIG_NET_IPV6) " ipv6" #endif ""; #if defined(PROFILER) #include "profiler.h" #endif #define MY_SRC_PORT 50000 #define DEF_PORT 5001 #if defined(CONFIG_NET_IPV6) static struct in6_addr ipv6; #endif static struct sockaddr_in6 in6_addr_my = { .sin6_family = AF_INET6, .sin6_port = htons(MY_SRC_PORT), }; static struct sockaddr_in6 in6_addr_dst = { .sin6_family = AF_INET6, .sin6_port = htons(DEF_PORT), }; struct sockaddr_in6 *zperf_get_sin6(void) { return &in6_addr_my; } #if defined(CONFIG_NET_IPV4) static struct in_addr ipv4; #endif static struct sockaddr_in in4_addr_my = { .sin_family = AF_INET, .sin_port = htons(MY_SRC_PORT), }; static struct sockaddr_in in4_addr_dst = { .sin_family = AF_INET, .sin_port = htons(DEF_PORT), }; struct sockaddr_in *zperf_get_sin(void) { return &in4_addr_my; } #if defined(CONFIG_NET_IPV6) int zperf_get_ipv6_addr(char *host, char *prefix_str, struct in6_addr *addr, const char *str) { struct net_if_ipv6_prefix *prefix; struct net_if_addr *ifaddr; int prefix_len; int ret; if (!host) { return -EINVAL; } ret = net_addr_pton(AF_INET6, host, (struct sockaddr *)addr); if (ret < 0) { return -EINVAL; } prefix_len = strtoul(prefix_str, NULL, 10); ifaddr = net_if_ipv6_addr_add(net_if_get_default(), addr, NET_ADDR_MANUAL, 0); if (!ifaddr) { printk("[%s] Error! Cannot set IPv6 address\n", str); return -EINVAL; } prefix = net_if_ipv6_prefix_add(net_if_get_default(), addr, prefix_len, NET_IPV6_ND_INFINITE_LIFETIME); if (!prefix) { printk("[%s] Error! Cannot set IPv6 prefix\n", str); return -EINVAL; } return 0; } #endif #if defined(CONFIG_NET_IPV4) int zperf_get_ipv4_addr(char *host, struct in_addr *addr, const char *str) { struct net_if_addr *ifaddr; int ret; if (!host) { return -EINVAL; } ret = net_addr_pton(AF_INET, host, (struct sockaddr *)addr); if (ret < 0) { return -EINVAL; } ifaddr = net_if_ipv4_addr_add(net_if_get_default(), addr, NET_ADDR_MANUAL, 0); if (!ifaddr) { printk("[%s] Error! Cannot set IPv4 address\n", str); return -EINVAL; } return 0; } #endif static int shell_cmd_setip(int argc, char *argv[]) { int start = 0; if (!strcmp(argv[0], "zperf")) { start++; argc--; } #if defined(CONFIG_NET_IPV6) && !defined(CONFIG_NET_IPV4) if (argc != 3) { /* Print usage */ printk("\n%s:\n", CMD_STR_SETIP); printk("Usage:\t%s <my ip> <prefix len>\n", CMD_STR_SETIP); printk("\nExample %s 2001:db8::2 64\n", CMD_STR_SETIP); return -1; } if (zperf_get_ipv6_addr(argv[start + 1], argv[start + 2], &ipv6, CMD_STR_SETIP) < 0) { printk("[%s] ERROR! Unable to set IP\n", CMD_STR_SETIP); return 0; } printk("[%s] Setting IP address %s\n", CMD_STR_SETIP, net_sprint_ipv6_addr(&ipv6)); #endif #if defined(CONFIG_NET_IPV4) && !defined(CONFIG_NET_IPV6) if (argc != 2) { /* Print usage */ printk("\n%s:\n", CMD_STR_SETIP); printk("Usage:\t%s <my ip>\n", CMD_STR_SETIP); printk("\nExample %s 10.237.164.178\n", CMD_STR_SETIP); return -1; } if (zperf_get_ipv4_addr(argv[start + 1], &ipv4, CMD_STR_SETIP) < 0) { printk("[%s] ERROR! Unable to set IP\n", CMD_STR_SETIP); return 0; } printk("[%s] Setting IP address %s\n", CMD_STR_SETIP, net_sprint_ipv4_addr(&ipv4)); #endif #if defined(CONFIG_NET_IPV6) && defined(CONFIG_NET_IPV4) if (net_addr_pton(AF_INET6, argv[start + 1], (struct sockaddr *)&ipv6) < 0) { if (argc != 2) { /* Print usage */ printk("\n%s:\n", CMD_STR_SETIP); printk("Usage:\t%s <my ip>\n", CMD_STR_SETIP); printk("\nExample %s 10.237.164.178\n", CMD_STR_SETIP); printk("Example %s 2001:db8::1 64\n", CMD_STR_SETIP); return -1; } if (zperf_get_ipv4_addr(argv[start + 1], &ipv4, CMD_STR_SETIP) < 0) { printk("[%s] ERROR! Unable to set IP\n", CMD_STR_SETIP); return 0; } printk("[%s] Setting IP address %s\n", CMD_STR_SETIP, net_sprint_ipv4_addr(&ipv4)); } else { if (argc != 3) { /* Print usage */ printk("\n%s:\n", CMD_STR_SETIP); printk("Usage:\t%s <my ip> <prefix len>\n", CMD_STR_SETIP); printk("\nExample %s 2001:db8::2 64\n", CMD_STR_SETIP); printk("Example %s 10.237.164.178\n", CMD_STR_SETIP); return -1; } if (zperf_get_ipv6_addr(argv[start + 1], argv[start + 2], &ipv6, CMD_STR_SETIP) < 0) { printk("[%s] ERROR! Unable to set IP\n", CMD_STR_SETIP); return 0; } printk("[%s] Setting IP address %s\n", CMD_STR_SETIP, net_sprint_ipv6_addr(&ipv6)); } #endif return 0; } #if defined(CONFIG_NET_UDP) static int shell_cmd_udp_download(int argc, char *argv[]) { static bool udp_stopped = true; int port, start = 0; if (!strcmp(argv[0], "zperf")) { start++; argc--; } if (argc == 1) { /* Print usage */ printk("\n%s:\n", CMD_STR_UDP_DOWNLOAD); printk("Usage:\t%s <port>\n", CMD_STR_UDP_DOWNLOAD); printk("\nExample %s 5001\n", CMD_STR_UDP_DOWNLOAD); return -1; } if (argc > 1) { port = strtoul(argv[start + 1], NULL, 10); } else { port = DEF_PORT; } if (!udp_stopped) { printk("[%s] ERROR! UDP server already started!\n", CMD_STR_UDP_DOWNLOAD); return -1; } zperf_receiver_init(port); k_yield(); udp_stopped = false; printk("[%s] UDP server started on port %u\n", CMD_STR_UDP_DOWNLOAD, port); return 0; } #endif #if defined(CONFIG_NET_UDP) static void shell_udp_upload_usage(void) { /* Print usage */ printk("\n%s:\n", CMD_STR_UDP_UPLOAD); printk("Usage:\t%s <dest ip> <dest port> <duration> <packet " "size>[K] <baud rate>[K|M]\n", CMD_STR_UDP_UPLOAD); printk("\t<dest ip>:\tIP destination\n"); printk("\t<dest port>:\tUDP destination port\n"); printk("\t<duration>:\tDuration of the test in seconds\n"); printk("\t<packet size>:\tSize of the packet in byte or kilobyte " "(with suffix K)\n"); printk("\t<baud rate>:\tBaudrate in kilobyte or megabyte\n"); printk("\nExample %s 10.237.164.178 1111 1 1K 1M\n", CMD_STR_UDP_UPLOAD); } static void shell_udp_upload2_usage(void) { /* Print usage */ printk("\n%s:\n", CMD_STR_UDP_UPLOAD2); printk("Usage:\t%s v6|v4 <duration> <packet " "size>[K] <baud rate>[K|M]\n", CMD_STR_UDP_UPLOAD2); printk("\t<v6|v4>:\tUse either IPv6 or IPv4\n"); printk("\t<duration>:\tDuration of the test in seconds\n"); printk("\t<packet size>:\tSize of the packet in byte or kilobyte " "(with suffix K)\n"); printk("\t<baud rate>:\tBaudrate in kilobyte or megabyte\n"); printk("\nExample %s v6 1 1K 1M\n", CMD_STR_UDP_UPLOAD2); #if defined(CONFIG_NET_IPV6) && defined(MY_IPV6ADDR) printk("\nDefault IPv6 address is %s, destination [%s]:%d\n", MY_IP6ADDR, DST_IP6ADDR, DEF_PORT); #endif #if defined(CONFIG_NET_IPV4) && defined(MY_IP4ADDR) printk("\nDefault IPv4 address is %s, destination %s:%d\n", MY_IP4ADDR, DST_IP4ADDR, DEF_PORT); #endif } #endif #if defined(CONFIG_NET_TCP) static void shell_tcp_upload_usage(void) { /* Print usage */ printk("\n%s:\n", CMD_STR_TCP_UPLOAD); printk("Usage:\t%s <dest ip> <dest port> <duration> <packet " "size>[K]\n", CMD_STR_TCP_UPLOAD); printk("\t<dest ip>:\tIP destination\n"); printk("\t<dest port>:\tport destination\n"); printk("\t<duration>:\t of the test in seconds\n"); printk("\t<packet size>:\tSize of the packet in byte or kilobyte " "(with suffix K)\n"); printk("\nExample %s 10.237.164.178 1111 1 1K 1M\n", CMD_STR_TCP_UPLOAD); } #endif #if defined(CONFIG_NET_UDP) static void shell_udp_upload_print_stats(struct zperf_results *results) { unsigned int rate_in_kbps, client_rate_in_kbps; printk("[%s] Upload completed!\n", CMD_STR_UDP_UPLOAD); if (results->time_in_us != 0) { rate_in_kbps = (uint32_t) (((uint64_t)results->nb_bytes_sent * (uint64_t)8 * (uint64_t)USEC_PER_SEC) / ((uint64_t)results->time_in_us * 1024)); } else { rate_in_kbps = 0; } if (results->client_time_in_us != 0) { client_rate_in_kbps = (uint32_t) (((uint64_t)results->nb_packets_sent * (uint64_t)results->packet_size * (uint64_t)8 * (uint64_t)USEC_PER_SEC) / ((uint64_t)results->client_time_in_us * 1024)); } else { client_rate_in_kbps = 0; } if (!rate_in_kbps) { printk("[%s] LAST PACKET NOT RECEIVED!!!\n", CMD_STR_UDP_UPLOAD); } printk("[%s] statistics:\t\tserver\t(client)\n", CMD_STR_UDP_UPLOAD); printk("[%s] duration:\t\t\t", CMD_STR_UDP_UPLOAD); print_number(results->time_in_us, TIME_US, TIME_US_UNIT); printk("\t("); print_number(results->client_time_in_us, TIME_US, TIME_US_UNIT); printk(")\n"); printk("[%s] nb packets:\t\t%u\t(%u)\n", CMD_STR_UDP_UPLOAD, results->nb_packets_rcvd, results->nb_packets_sent); printk("[%s] nb packets outorder:\t%u\n", CMD_STR_UDP_UPLOAD, results->nb_packets_outorder); printk("[%s] nb packets lost:\t\t%u\n", CMD_STR_UDP_UPLOAD, results->nb_packets_lost); printk("[%s] jitter:\t\t\t", CMD_STR_UDP_UPLOAD); print_number(results->jitter_in_us, TIME_US, TIME_US_UNIT); printk("\n"); printk("[%s] rate:\t\t\t", CMD_STR_UDP_UPLOAD); print_number(rate_in_kbps, KBPS, KBPS_UNIT); printk("\t("); print_number(client_rate_in_kbps, KBPS, KBPS_UNIT); printk(")\n"); } #endif #if defined(CONFIG_NET_TCP) static void shell_tcp_upload_print_stats(struct zperf_results *results) { unsigned int client_rate_in_kbps; printk("[%s] Upload completed!\n", CMD_STR_TCP_UPLOAD); if (results->client_time_in_us != 0) { client_rate_in_kbps = (uint32_t) (((uint64_t)results->nb_packets_sent * (uint64_t)results->packet_size * (uint64_t)8 * (uint64_t)USEC_PER_SEC) / ((uint64_t)results->client_time_in_us * 1024)); } else { client_rate_in_kbps = 0; } printk("[%s] duration:\t", CMD_STR_TCP_UPLOAD); print_number(results->client_time_in_us, TIME_US, TIME_US_UNIT); printk("\n"); printk("[%s] nb packets:\t%u\n", CMD_STR_UDP_UPLOAD, results->nb_packets_sent); printk("[%s] nb sending errors (retry or fail):\t%u\n", CMD_STR_UDP_UPLOAD, results->nb_packets_errors); printk("[%s] rate:\t", CMD_STR_UDP_UPLOAD); print_number(client_rate_in_kbps, KBPS, KBPS_UNIT); printk("\n"); } #endif static int setup_contexts(struct net_context **context6, struct net_context **context4, sa_family_t family, struct sockaddr_in6 *ipv6, struct sockaddr_in *ipv4, int port, bool is_udp, char *argv0) { int ret; #if defined(CONFIG_NET_IPV6) ret = net_context_get(AF_INET6, is_udp ? SOCK_DGRAM : SOCK_STREAM, is_udp ? IPPROTO_UDP : IPPROTO_TCP, context6); if (ret < 0) { printk("[%s] Cannot get IPv6 network context (%d)\n", argv0, ret); return -1; } ipv6->sin6_port = htons(port); ipv6->sin6_family = AF_INET6; #endif #if defined(CONFIG_NET_IPV4) ret = net_context_get(AF_INET, is_udp ? SOCK_DGRAM : SOCK_STREAM, is_udp ? IPPROTO_UDP : IPPROTO_TCP, context4); if (ret < 0) { printk("[%s] Cannot get IPv4 network context (%d)\n", argv0, ret); return -1; } ipv4->sin_port = htons(port); ipv4->sin_family = AF_INET; #endif if (family == AF_INET6 && *context6) { ret = net_context_bind(*context6, (struct sockaddr *)ipv6, sizeof(struct sockaddr_in6)); if (ret < 0) { printk("[%s] Cannot bind IPv6 port %d (%d)", argv0, ntohs(ipv6->sin6_port), ret); return -1; } } if (family == AF_INET && *context4) { ret = net_context_bind(*context4, (struct sockaddr *)ipv4, sizeof(struct sockaddr_in)); if (ret < 0) { printk("[%s] Cannot bind IPv4 port %d (%d)", argv0, ntohs(ipv4->sin_port), ret); return -1; } } if (!(*context6) && !(*context4)) { printk("[%s] ERROR! Fail to retrieve network context(s)\n", argv0); return -1; } return 0; } static int execute_upload(struct net_context *context6, struct net_context *context4, sa_family_t family, struct sockaddr_in6 *ipv6, struct sockaddr_in *ipv4, bool is_udp, char *argv0, unsigned int duration_in_ms, unsigned int packet_size, unsigned int rate_in_kbps) { struct zperf_results results = { }; int ret; printk("[%s] duration:\t\t", argv0); print_number(duration_in_ms * USEC_PER_MSEC, TIME_US, TIME_US_UNIT); printk("\n"); printk("[%s] packet size:\t%u bytes\n", argv0, packet_size); printk("[%s] start...\n", argv0); #if defined(CONFIG_NET_IPV6) if (family == AF_INET6 && context6) { /* For IPv6, we should make sure that neighbor discovery * has been done for the peer. So send ping here, wait * some time and start the test after that. */ net_icmpv6_send_echo_request(net_if_get_default(), &ipv6->sin6_addr, 0, 0); k_sleep(1 * MSEC_PER_SEC); } #endif if (is_udp) { #if defined(CONFIG_NET_UDP) printk("[%s] rate:\t\t", argv0); print_number(rate_in_kbps, KBPS, KBPS_UNIT); printk("\n"); if (family == AF_INET6 && context6) { ret = net_context_connect(context6, (struct sockaddr *)ipv6, sizeof(*ipv6), NULL, K_NO_WAIT, NULL); if (ret < 0) { printk("[%s] IPv6 connect failed (%d)\n", argv0, ret); goto out; } zperf_udp_upload(context6, duration_in_ms, packet_size, rate_in_kbps, &results); shell_udp_upload_print_stats(&results); } if (family == AF_INET && context4) { ret = net_context_connect(context4, (struct sockaddr *)ipv4, sizeof(*ipv4), NULL, K_NO_WAIT, NULL); if (ret < 0) { printk("[%s] IPv4 connect failed (%d)\n", argv0, ret); goto out; } zperf_udp_upload(context4, duration_in_ms, packet_size, rate_in_kbps, &results); shell_udp_upload_print_stats(&results); } #else printk("[%s] UDP not supported\n", argv0); #endif } else { #if defined(CONFIG_NET_TCP) if (context6) { ret = net_context_connect(context6, (struct sockaddr *)ipv6, sizeof(*ipv6), NULL, K_NO_WAIT, NULL); if (ret < 0) { printk("[%s] IPv6 connect failed\n", argv0); goto out; } zperf_tcp_upload(context6, duration_in_ms, packet_size, &results); shell_tcp_upload_print_stats(&results); } if (context4) { ret = net_context_connect(context4, (struct sockaddr *)ipv4, sizeof(*ipv4), NULL, K_NO_WAIT, NULL); if (ret < 0) { printk("[%s] IPv4 connect failed\n", argv0); goto out; } zperf_tcp_upload(context4, duration_in_ms, packet_size, &results); shell_tcp_upload_print_stats(&results); } #else printk("[%s] TCP not supported\n", argv0); #endif } out: net_context_put(context6); net_context_put(context4); return 0; } static int shell_cmd_upload(int argc, char *argv[]) { struct net_context *context6 = NULL, *context4 = NULL; sa_family_t family = AF_UNSPEC; unsigned int duration_in_ms, packet_size, rate_in_kbps; struct sockaddr_in6 ipv6; struct sockaddr_in ipv4; uint16_t port; bool is_udp; int start = 0; if (!strcmp(argv[0], "zperf")) { start++; argc--; } is_udp = !strcmp(argv[start], CMD_STR_UDP_UPLOAD) ? 1 : 0; if (argc == 1) { if (is_udp) { #if defined(CONFIG_NET_UDP) shell_udp_upload_usage(); #endif } else { #if defined(CONFIG_NET_TCP) shell_tcp_upload_usage(); #endif } return -1; } #if defined(CONFIG_NET_IPV6) && !defined(CONFIG_NET_IPV4) if (zperf_get_ipv6_addr(argv[start + 1], argv[start + 2], &ipv6.sin6_addr, argv[start]) < 0) { printk("[%s] ERROR! Please specify the IP address of the " "remote server\n", argv[start]); return -1; } printk("[%s] Connecting to %s\n", argv[start], net_sprint_ipv6_addr(&ipv6.sin6_addr)); family = AF_INET6; #endif #if defined(CONFIG_NET_IPV4) && !defined(CONFIG_NET_IPV6) if (zperf_get_ipv4_addr(argv[start + 1], &ipv4.sin_addr, argv[start]) < 0) { printk("[%s] ERROR! Please specify the IP address of the " "remote server\n", argv[start]); return -1; } printk("[%s] Connecting to %s\n", argv[start], net_sprint_ipv4_addr(&ipv4.sin_addr)); family = AF_INET; #endif #if defined(CONFIG_NET_IPV6) && defined(CONFIG_NET_IPV4) if (zperf_get_ipv6_addr(argv[start + 1], argv[start + 2], &ipv6.sin6_addr, argv[start]) < 0) { if (zperf_get_ipv4_addr(argv[start + 1], &ipv4.sin_addr, argv[start]) < 0) { printk("[%s] ERROR! Please specify the IP address " "of the remote server\n", argv[start]); return -1; } printk("[%s] Connecting to %s\n", argv[start], net_sprint_ipv4_addr(&ipv4.sin_addr)); family = AF_INET; } else { printk("[%s] Connecting to %s\n", argv[start], net_sprint_ipv6_addr(&ipv6.sin6_addr)); family = AF_INET6; } #endif if (argc > 2) { port = strtoul(argv[start + 2], NULL, 10); printk("[%s] Remote port is %u\n", argv[start], port); } else { port = DEF_PORT; } if (setup_contexts(&context6, &context4, family, &in6_addr_my, &in4_addr_my, port, is_udp, argv[start]) < 0) { return -1; } if (argc > 3) { duration_in_ms = strtoul(argv[start + 3], NULL, 10) * MSEC_PER_SEC; } else { duration_in_ms = 1000; } if (argc > 4) { packet_size = parse_number(argv[start + 4], K, K_UNIT); } else { packet_size = 256; } if (argc > 5) { rate_in_kbps = (parse_number(argv[start + 5], K, K_UNIT) + 1023) / 1024; } else { rate_in_kbps = 10; } return execute_upload(context6, context4, family, &ipv6, &ipv4, is_udp, argv[start], duration_in_ms, packet_size, rate_in_kbps); } static int shell_cmd_upload2(int argc, char *argv[]) { struct net_context *context6 = NULL, *context4 = NULL; uint16_t port = DEF_PORT; unsigned int duration_in_ms, packet_size, rate_in_kbps; sa_family_t family; uint8_t is_udp; int start = 0; if (!strcmp(argv[0], "zperf")) { start++; argc--; } is_udp = !strcmp(argv[start], CMD_STR_UDP_UPLOAD2) ? 1 : 0; if (argc == 1) { if (is_udp) { #if defined(CONFIG_NET_UDP) shell_udp_upload2_usage(); #endif } else { #if defined(CONFIG_NET_TCP) shell_tcp_upload2_usage(); #endif } return -1; } family = !strcmp(argv[start + 1], "v4") ? AF_INET : AF_INET6; #if defined(CONFIG_NET_IPV6) in6_addr_my.sin6_port = htons(port); #endif #if defined(CONFIG_NET_IPV4) in4_addr_my.sin_port = htons(port); #endif if (family == AF_INET6) { if (net_is_ipv6_addr_unspecified(&in6_addr_my.sin6_addr)) { printk("[%s] Invalid local IPv6 address\n", argv[start]); return -1; } if (net_is_ipv6_addr_unspecified(&in6_addr_dst.sin6_addr)) { printk("[%s] Invalid destination IPv6 address\n", argv[start]); return -1; } printk("[%s] Connecting to %s\n", argv[start], net_sprint_ipv6_addr(&in6_addr_dst.sin6_addr)); } else { if (net_is_ipv4_addr_unspecified(&in4_addr_my.sin_addr)) { printk("[%s] Invalid local IPv4 address\n", argv[start]); return -1; } if (net_is_ipv4_addr_unspecified(&in4_addr_dst.sin_addr)) { printk("[%s] Invalid destination IPv4 address\n", argv[start]); return -1; } printk("[%s] Connecting to %s\n", argv[start], net_sprint_ipv4_addr(&in4_addr_dst.sin_addr)); } if (setup_contexts(&context6, &context4, family, &in6_addr_my, &in4_addr_my, port, is_udp, argv[start]) < 0) { return -1; } if (argc > 1) { duration_in_ms = strtoul(argv[start + 2], NULL, 10) * MSEC_PER_SEC; } else { duration_in_ms = 1000; } if (argc > 2) { packet_size = parse_number(argv[start + 3], K, K_UNIT); } else { packet_size = 256; } if (argc > 3) { rate_in_kbps = (parse_number(argv[start + 4], K, K_UNIT) + 1023) / 1024; } else { rate_in_kbps = 10; } return execute_upload(context6, context4, family, &in6_addr_dst, &in4_addr_dst, is_udp, argv[start], duration_in_ms, packet_size, rate_in_kbps); } static int shell_cmd_connectap(int argc, char *argv[]) { printk("[%s] Zephyr has not been built with Wi-Fi support.\n", CMD_STR_CONNECTAP); return 0; } #if defined(CONFIG_NET_TCP) static int shell_cmd_tcp_download(int argc, char *argv[]) { static bool tcp_stopped = true; int port; if (argc == 1) { /* Print usage */ printk("\n[%s]:\n", CMD_STR_TCP_DOWNLOAD); printk("Usage:\t%s <port>\n", CMD_STR_TCP_DOWNLOAD); printk("\nExample %s 5001\n", CMD_STR_TCP_DOWNLOAD); return -1; } if (argc > 1) { port = strtoul(argv[1], NULL, 10); } else { port = DEF_PORT; } if (!tcp_stopped) { printk("[%s] ERROR! TCP server already started!\n", CMD_STR_TCP_DOWNLOAD); return -1; } zperf_tcp_receiver_init(port); tcp_stopped = false; printk("[%s] TCP server started on port %u\n", CMD_STR_TCP_DOWNLOAD, port); return 0; } #endif static int shell_cmd_version(int argc, char *argv[]) { printk("\nzperf [%s]: %s config: %s\n", CMD_STR_VERSION, VERSION, CONFIG); return 0; } static void zperf_init(void) { #if defined(MY_IP6ADDR) || defined(MY_IP4ADDR) int ret; printk("\n"); #endif #if defined(CONFIG_NET_IPV6) && defined(MY_IP6ADDR) if (zperf_get_ipv6_addr(MY_IP6ADDR, MY_PREFIX_LEN_STR, &ipv6, __func__) < 0) { printk("[%s] ERROR! Unable to set IP\n", __func__); } else { printk("[%s] Setting IP address %s\n", __func__, net_sprint_ipv6_addr(&ipv6)); net_ipaddr_copy(&in6_addr_my.sin6_addr, &ipv6); } ret = net_addr_pton(AF_INET6, DST_IP6ADDR, (struct sockaddr *)&in6_addr_dst.sin6_addr); if (ret < 0) { printk("[%s] ERROR! Unable to set IP %s\n", __func__, DST_IP6ADDR); } else { printk("[%s] Setting destination IP address %s\n", __func__, net_sprint_ipv6_addr(&in6_addr_dst.sin6_addr)); } #endif #if defined(CONFIG_NET_IPV4) && defined(MY_IP4ADDR) if (zperf_get_ipv4_addr(MY_IP4ADDR, &ipv4, __func__) < 0) { printk("[%s] ERROR! Unable to set IP\n", __func__); } else { printk("[%s] Setting IP address %s\n", __func__, net_sprint_ipv4_addr(&ipv4)); net_ipaddr_copy(&in4_addr_my.sin_addr, &ipv4); } ret = net_addr_pton(AF_INET, DST_IP4ADDR, (struct sockaddr *)&in4_addr_dst.sin_addr); if (ret < 0) { printk("[%s] ERROR! Unable to set IP %s\n", __func__, DST_IP4ADDR); } else { printk("[%s] Setting destination IP address %s\n", __func__, net_sprint_ipv4_addr(&in4_addr_dst.sin_addr)); } #endif zperf_session_init(); } #define MY_SHELL_MODULE "zperf" struct shell_cmd commands[] = { { CMD_STR_SETIP, shell_cmd_setip }, { CMD_STR_CONNECTAP, shell_cmd_connectap }, { CMD_STR_VERSION, shell_cmd_version }, #if defined(CONFIG_NET_UDP) { CMD_STR_UDP_UPLOAD, shell_cmd_upload }, /* Same as upload command but no need to specify the addresses */ { CMD_STR_UDP_UPLOAD2, shell_cmd_upload2 }, { CMD_STR_UDP_DOWNLOAD, shell_cmd_udp_download }, #endif #if defined(CONFIG_NET_TCP) { CMD_STR_TCP_DOWNLOAD, shell_cmd_tcp_download }, #endif #if defined(PROFILER) PROF_CMD, #endif { NULL, NULL } }; void main(void) { shell_cmd_version(0, NULL); SHELL_REGISTER(MY_SHELL_MODULE, commands); shell_register_default_module(MY_SHELL_MODULE); zperf_init(); #if PROFILER while (1) { k_sleep(5 * MSEC_PER_SEC); prof_flush(); } #else k_sleep(K_FOREVER); #endif }
apache-2.0
mdoering/backbone
life/Fungi/Basidiomycota/Agaricomycetes/Agaricales/Tricholomataceae/Mycenella/Mycenella radicata/README.md
254
# Mycenella radicata (Thiers) Maas Geest. SPECIES #### Status ACCEPTED #### According to Index Fungorum #### Published in Proc. K. Ned. Akad. Wet. , Ser. C, Biol. Med. Sci. 84(4): 508 (1983) #### Original name Mycena radicata Thiers ### Remarks null
apache-2.0
wooga/airflow
airflow/providers/sftp/PROVIDERS_CHANGES_2020.05.20.md
2495
### Release 2020.5.20 | Commit | Committed | Subject | |:-----------------------------------------------------------------------------------------------|:------------|:---------------------------------------------------------------------------| | [00642a46d](https://github.com/apache/airflow/commit/00642a46d019870c4decb3d0e47c01d6a25cb88c) | 2020-05-26 | Fixed name of 20 remaining wrongly named operators. (#8994) | | [375d1ca22](https://github.com/apache/airflow/commit/375d1ca229464617780623c61c6e8a1bf570c87f) | 2020-05-19 | Release candidate 2 for backport packages 2020.05.20 (#8898) | | [12c5e5d8a](https://github.com/apache/airflow/commit/12c5e5d8ae25fa633efe63ccf4db389e2b796d79) | 2020-05-17 | Prepare release candidate for backport packages (#8891) | | [f3521fb0e](https://github.com/apache/airflow/commit/f3521fb0e36733d8bd356123e56a453fd37a6dca) | 2020-05-16 | Regenerate readme files for backport package release (#8886) | | [92585ca4c](https://github.com/apache/airflow/commit/92585ca4cb375ac879f4ab331b3a063106eb7b92) | 2020-05-15 | Added automated release notes generation for backport operators (#8807) | | [bac0ab27c](https://github.com/apache/airflow/commit/bac0ab27cfc89e715efddc97214fcd7738084361) | 2020-03-30 | close sftp connection without error (#7953) | | [42eef3821](https://github.com/apache/airflow/commit/42eef38217e709bc7a7f71bf0286e9e61293a43e) | 2020-03-07 | [AIRFLOW-6877] Add cross-provider dependencies as extras (#7506) | | [97a429f9d](https://github.com/apache/airflow/commit/97a429f9d0cf740c5698060ad55f11e93cb57b55) | 2020-02-02 | [AIRFLOW-6714] Remove magic comments about UTF-8 (#7338) | | [ceea293c1](https://github.com/apache/airflow/commit/ceea293c1652240e7e856c201e4341a87ef97a0f) | 2020-01-28 | [AIRFLOW-6656] Fix AIP-21 moving (#7272) | | [9a04013b0](https://github.com/apache/airflow/commit/9a04013b0e40b0d744ff4ac9f008491806d60df2) | 2020-01-27 | [AIRFLOW-6646][AIP-21] Move protocols classes to providers package (#7268) | | [69629a5a9](https://github.com/apache/airflow/commit/69629a5a948ab2c4ac04a4a4dca6ac86d19c11bd) | 2019-12-09 | [AIRFLOW-5807] Move SFTP from contrib to providers. (#6464) |
apache-2.0
LQJJ/demo
111-meiju/meiju/AdViewSDK-3.5.2/AdNetworks/AdChina/lib/AdChinaSShareNetworkAdapter.h
423
// // AdChinaSShareNetworkAdapter.h // AdChinaSShareKit // // Created by Daxiong on 13-11-18. // Copyright (c) 2013年 Daxiong. All rights reserved. // #import "AdChinaSShareAdapter.h" @interface AdChinaSShareNetworkAdapter : AdChinaSShareAdapter /** * start request and return connection */ - (NSURLConnection *) urlConnectionWithType:(NSString *)type andUrl:(NSURL *)url andParam:(NSDictionary *)paramDict; @end
apache-2.0
dexter-at-git/coursera-android-spring
assignments/assignment3/client/src/vandy/mooc/provider/VideoContract.java
2701
package vandy.mooc.provider; import android.content.ContentUris; import android.net.Uri; import android.provider.BaseColumns; /** * Defines table and column names for the Acronym database. */ public final class VideoContract { /** * The "Content authority" is a name for the entire content provider, * similar to the relationship between a domain name and its website. A * convenient string to use for the content authority is the package name * for the app, which must be unique on the device. */ public static final String CONTENT_AUTHORITY = "vandy.mooc.video"; /** * Use CONTENT_AUTHORITY to create the base of all URI's that apps will use * to contact the content provider. */ public static final Uri BASE_CONTENT_URI = Uri.parse("content://" + CONTENT_AUTHORITY); /** * Possible paths (appended to base content URI for possible URI's), e.g., * content://vandy.mooc/acronym/ is a valid path for Acronym data. However, * content://vandy.mooc/givemeroot/ will fail since the ContentProvider * hasn't been given any information on what to do with "givemeroot". */ public static final String PATH_VIDEO = VideoEntry.TABLE_NAME; /** * Inner class that defines the contents of the Acronym table. */ public static final class VideoEntry implements BaseColumns { /** * Use BASE_CONTENT_URI to create the unique URI for Acronym Table that * apps will use to contact the content provider. */ public static final Uri CONTENT_URI = BASE_CONTENT_URI.buildUpon() .appendPath(PATH_VIDEO).build(); /** * When the Cursor returned for a given URI by the ContentProvider * contains 0..x items. */ public static final String CONTENT_ITEMS_TYPE = "vnd.android.cursor.dir/" + CONTENT_AUTHORITY + "/" + PATH_VIDEO; /** * When the Cursor returned for a given URI by the ContentProvider * contains 1 item. */ public static final String CONTENT_ITEM_TYPE = "vnd.android.cursor.item/" + CONTENT_AUTHORITY + "/" + PATH_VIDEO; /** * Name of the database table. */ public static final String TABLE_NAME = "video_table"; /** * Columns to store Data of each Acronym Expansion. */ public static final String COLUMN_TITLE = "title"; public static final String COLUMN_DURATION = "duration"; public static final String COLUMN_CONTENT_TYPE = "content_type"; public static final String COLUMN_DATA_URL = "data_url"; public static final String COLUMN_STAR_RATING = "star_rating"; /** * Return a Uri that points to the row containing a given id. * * @param id * @return Uri */ public static Uri buildVideoUri(Long id) { return ContentUris.withAppendedId(CONTENT_URI, id); } } }
apache-2.0
PetrGasparik/cas
cas-server-core-services/src/main/java/org/jasig/cas/authentication/principal/AbstractWebApplicationService.java
4645
package org.jasig.cas.authentication.principal; import com.google.common.collect.ImmutableMap; import org.apache.commons.lang3.builder.HashCodeBuilder; import org.jasig.cas.logout.SingleLogoutService; import org.jasig.cas.validation.ValidationResponseType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.net.URLDecoder; import java.util.Map; /** * Abstract implementation of a WebApplicationService. * * @author Scott Battaglia * @since 3.1 */ public abstract class AbstractWebApplicationService implements SingleLogoutService { private static final long serialVersionUID = 610105280927740076L; private static final Map<String, Object> EMPTY_MAP = ImmutableMap.of(); /** Logger instance. **/ protected final Logger logger = LoggerFactory.getLogger(this.getClass()); /** The id of the service. */ private final String id; /** The original url provided, used to reconstruct the redirect url. */ private final String originalUrl; private final String artifactId; private Principal principal; private boolean loggedOutAlready; private final ResponseBuilder<WebApplicationService> responseBuilder; private ValidationResponseType format = ValidationResponseType.XML; /** * Instantiates a new abstract web application service. * * @param id the id * @param originalUrl the original url * @param artifactId the artifact id * @param responseBuilder the response builder */ protected AbstractWebApplicationService(final String id, final String originalUrl, final String artifactId, final ResponseBuilder<WebApplicationService> responseBuilder) { this.id = id; this.originalUrl = originalUrl; this.artifactId = artifactId; this.responseBuilder = responseBuilder; } @Override public final String toString() { return this.id; } @Override public final String getId() { return this.id; } @Override public final String getArtifactId() { return this.artifactId; } @Override public final Map<String, Object> getAttributes() { return EMPTY_MAP; } /** * Return the original url provided (as {@code service} or {@code targetService} request parameter). * Used to reconstruct the redirect url. * * @return the original url provided. */ @Override public final String getOriginalUrl() { return this.originalUrl; } @Override public boolean equals(final Object object) { if (object == null) { return false; } if (object instanceof Service) { final Service service = (Service) object; return getId().equals(service.getId()); } return false; } @Override public int hashCode() { return new HashCodeBuilder() .append(this.id) .toHashCode(); } public Principal getPrincipal() { return this.principal; } @Override public void setPrincipal(final Principal principal) { this.principal = principal; } @Override public boolean matches(final Service service) { try { final String thisUrl = URLDecoder.decode(this.id, "UTF-8"); final String serviceUrl = URLDecoder.decode(service.getId(), "UTF-8"); logger.trace("Decoded urls and comparing [{}] with [{}]", thisUrl, serviceUrl); return thisUrl.equalsIgnoreCase(serviceUrl); } catch (final Exception e) { logger.error(e.getMessage(), e); } return false; } /** * Return if the service is already logged out. * * @return if the service is already logged out. */ @Override public boolean isLoggedOutAlready() { return loggedOutAlready; } /** * Set if the service is already logged out. * * @param loggedOutAlready if the service is already logged out. */ @Override public final void setLoggedOutAlready(final boolean loggedOutAlready) { this.loggedOutAlready = loggedOutAlready; } protected ResponseBuilder<? extends WebApplicationService> getResponseBuilder() { return responseBuilder; } @Override public ValidationResponseType getFormat() { return format; } public void setFormat(final ValidationResponseType format) { this.format = format; } @Override public Response getResponse(final String ticketId) { return this.responseBuilder.build(this, ticketId); } }
apache-2.0
kennykwok1/PlaygroundOSS
Doc/LuaAPI/README.md
15
see index.html
apache-2.0
dreamsxin/101_browser
src/GuiOpenGL/GuiComponentsBasic.cpp
4410
/* * Copyright 2008-2011 Wolfgang Keller * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "GuiOpenGL/GuiComponentsBasic.h" #ifdef _WIN32 #include <windows.h> #endif #include <GL/gl.h> #include <cassert> void createStraightBorder(Vertex2<float> prevVertex, Vertex2<float> currVertex, Vertex2<float> nextVertex, std::vector<Vertex2<float> >* pBorderTriangleStrip, float borderWidth, size_t) { Vector2<float> prevToCurrVect = currVertex - prevVertex; Vector2<float> currToNextVect = nextVertex - currVertex; normalize(&prevToCurrVect); normalize(&currToNextVect); Vector2<float> direction = prevToCurrVect+currToNextVect; normalize(&direction); Vector2<float> rightFromDirection = normal(direction); float scaleFactor = -borderWidth/(currToNextVect.x*rightFromDirection.x+ currToNextVect.y*rightFromDirection.y); pBorderTriangleStrip->push_back(currVertex); pBorderTriangleStrip->push_back(currVertex+rightFromDirection*scaleFactor); } void createRoundBorder(Vertex2<float> prevVertex, Vertex2<float> currVertex, Vertex2<float> nextVertex, std::vector<Vertex2<float> >* pBorderTriangleStrip, float borderWidth, size_t curveSegmentsCount) { assert(curveSegmentsCount>=1); Vector2<float> prevToCurrVect = currVertex - prevVertex; Vector2<float> currToNextVect = nextVertex - currVertex; normalize(&prevToCurrVect); normalize(&currToNextVect); Vector2<float> prevToCurrNormal = normal(prevToCurrVect); Vector2<float> currToNextNormal = normal(currToNextVect); /* * The orthogonal matrix that rotates (1, 0) to prevToCurrNormal is * * | prevToCurrNormal.x prevToCurrVect.x | * | prevToCurrNormal.y prevToCurrVect.y | * * Since this is an orthogonal matrix the inverse one is this one transposed */ Matrix22<float> orth = Matrix22<float>(prevToCurrNormal.x, prevToCurrNormal.y, prevToCurrVect.x, prevToCurrVect.y).transpose(); Vector2<float> angleVector = orth * currToNextNormal; // The order has to be y, x -- see declaration of atan2f float angle = atan2f(angleVector.y, angleVector.x); for (size_t i=0; i<=curveSegmentsCount; i++) { float currentAngle = i*angle/curveSegmentsCount; Matrix22<float> currentRotation = Matrix22<float>( cosf(currentAngle), sinf(currentAngle), -sinf(currentAngle), cosf(currentAngle)); Vector2<float> movement = currentRotation*prevToCurrNormal*borderWidth; pBorderTriangleStrip->push_back(currVertex); pBorderTriangleStrip->push_back(currVertex+movement); } } /*! * vertices output: * [0]: bottom left * [1]: bottom right * [2]: top left * [3]: top right */ void createBoxVertices(std::vector<Vertex2<float> >* boxVertices, float left, float top, float width, float height, float currentHeight) { /* * 2-3 * |\| * 0-1 */ boxVertices->push_back(Vertex2<float>(left, currentHeight-top-height)); // bottom left boxVertices->push_back(Vertex2<float>(left+width, currentHeight-top-height)); // bottom right boxVertices->push_back(Vertex2<float>(left, currentHeight-top)); // top left boxVertices->push_back(Vertex2<float>(left+width, currentHeight-top)); // top right } void drawVertexArray(const std::vector<Vertex2<float> >* vertices, Color4<float> colors[4]) { glEnableClientState(GL_VERTEX_ARRAY); glEnableClientState(GL_COLOR_ARRAY); glVertexPointer(2, GL_FLOAT, 0, &vertices->at(0)); glColorPointer(4, GL_FLOAT, 0, colors); glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); glDisableClientState(GL_COLOR_ARRAY); glDisableClientState(GL_VERTEX_ARRAY); } void drawVertexArray(const std::vector<Vertex2<float> >* vertices, Color4<float> color) { glEnableClientState(GL_VERTEX_ARRAY); glColor4fv(&color.r); glVertexPointer(2, GL_FLOAT, 0, &vertices->at(0)); glDrawArrays(GL_TRIANGLE_STRIP, 0, (GLsizei) vertices->size()); glDisableClientState(GL_VERTEX_ARRAY); }
apache-2.0
redisson/redisson
redisson/src/main/java/org/redisson/RedissonPatternTopic.java
5917
/** * Copyright (c) 2013-2021 Nikita Koksharov * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.redisson; import org.redisson.api.RFuture; import org.redisson.api.RPatternTopic; import org.redisson.api.listener.PatternMessageListener; import org.redisson.api.listener.PatternStatusListener; import org.redisson.client.ChannelName; import org.redisson.client.RedisPubSubListener; import org.redisson.client.RedisTimeoutException; import org.redisson.client.codec.Codec; import org.redisson.client.protocol.pubsub.PubSubType; import org.redisson.command.CommandAsyncExecutor; import org.redisson.config.MasterSlaveServersConfig; import org.redisson.misc.CompletableFutureWrapper; import org.redisson.pubsub.AsyncSemaphore; import org.redisson.pubsub.PubSubConnectionEntry; import org.redisson.pubsub.PublishSubscribeService; import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.concurrent.CompletableFuture; /** * Distributed topic implementation. Messages are delivered to all message listeners across Redis cluster. * * @author Nikita Koksharov * */ public class RedissonPatternTopic implements RPatternTopic { final PublishSubscribeService subscribeService; final CommandAsyncExecutor commandExecutor; private final String name; private final ChannelName channelName; private final Codec codec; protected RedissonPatternTopic(CommandAsyncExecutor commandExecutor, String name) { this(commandExecutor.getConnectionManager().getCodec(), commandExecutor, name); } protected RedissonPatternTopic(Codec codec, CommandAsyncExecutor commandExecutor, String name) { this.commandExecutor = commandExecutor; this.name = name; this.channelName = new ChannelName(name); this.codec = codec; this.subscribeService = commandExecutor.getConnectionManager().getSubscribeService(); } @Override public int addListener(PatternStatusListener listener) { return addListener(new PubSubPatternStatusListener(listener, name)); }; @Override public <T> int addListener(Class<T> type, PatternMessageListener<T> listener) { PubSubPatternMessageListener<T> pubSubListener = new PubSubPatternMessageListener<T>(type, listener, name); return addListener(pubSubListener); } private int addListener(RedisPubSubListener<?> pubSubListener) { CompletableFuture<Collection<PubSubConnectionEntry>> future = subscribeService.psubscribe(channelName, codec, pubSubListener); commandExecutor.get(future); return System.identityHashCode(pubSubListener); } @Override public RFuture<Integer> addListenerAsync(PatternStatusListener listener) { PubSubPatternStatusListener pubSubListener = new PubSubPatternStatusListener(listener, name); return addListenerAsync(pubSubListener); } @Override public <T> RFuture<Integer> addListenerAsync(Class<T> type, PatternMessageListener<T> listener) { PubSubPatternMessageListener<T> pubSubListener = new PubSubPatternMessageListener<T>(type, listener, name); return addListenerAsync(pubSubListener); } private RFuture<Integer> addListenerAsync(RedisPubSubListener<?> pubSubListener) { CompletableFuture<Collection<PubSubConnectionEntry>> future = subscribeService.psubscribe(channelName, codec, pubSubListener); CompletableFuture<Integer> f = future.thenApply(res -> { return System.identityHashCode(pubSubListener); }); return new CompletableFutureWrapper<>(f); } protected void acquire(AsyncSemaphore semaphore) { MasterSlaveServersConfig config = commandExecutor.getConnectionManager().getConfig(); int timeout = config.getTimeout() + config.getRetryInterval() * config.getRetryAttempts(); if (!semaphore.tryAcquire(timeout)) { throw new RedisTimeoutException("Remove listeners operation timeout: (" + timeout + "ms) for " + name + " topic"); } } @Override public RFuture<Void> removeListenerAsync(int listenerId) { CompletableFuture<Void> f = subscribeService.removeListenerAsync(PubSubType.PUNSUBSCRIBE, channelName, listenerId); return new CompletableFutureWrapper<>(f); } @Override public void removeListener(int listenerId) { commandExecutor.get(removeListenerAsync(listenerId).toCompletableFuture()); } @Override public void removeAllListeners() { AsyncSemaphore semaphore = subscribeService.getSemaphore(channelName); acquire(semaphore); PubSubConnectionEntry entry = subscribeService.getPubSubEntry(channelName); if (entry == null) { semaphore.release(); return; } if (entry.hasListeners(channelName)) { subscribeService.unsubscribe(PubSubType.PUNSUBSCRIBE, channelName).toCompletableFuture().join(); } semaphore.release(); } @Override public void removeListener(PatternMessageListener<?> listener) { CompletableFuture<Void> future = subscribeService.removeListenerAsync(PubSubType.PUNSUBSCRIBE, channelName, listener); commandExecutor.get(future); } @Override public List<String> getPatternNames() { return Collections.singletonList(name); } }
apache-2.0
0Cristofer/telebot
main.py
6516
#Please read "usefull links" before going on, they are necessary for better understanding import StringIO import json #Imports the json library that decodes json tokens recieved from telegram api import logging #Imports the library that puts messages in the log info of the google app engine import random #Library that creates random numbers import urllib import urllib2 # for sending images from PIL import Image import multipart # standard app engine imports from google.appengine.api import urlfetch from google.appengine.ext import ndb import webapp2 TOKEN = 'YOUR_BOT_TOKEN_HERE' BASE_URL = 'https://api.telegram.org/bot' + TOKEN + '/' # ================================ class EnableStatus(ndb.Model): #NDB entity called EnabledStatus # key name: str(chat_id) enabled = ndb.BooleanProperty(indexed=False, default=False) #Entity has atribute enabled # ================================ def setEnabled(chat_id, yes): es = ndb.Key(EnableStatus, str(chat_id)).get() #Gets the entity if es: #If it exists es.enabled = yes #Sets its enabled atribute es.put() return es = EnableStatus(id = str(chat_id)) #If not creates a new entity es.put() def getEnabled(chat_id): es = ndb.Key(EnableStatus, str(chat_id)).get() if es: return es.enabled #Return the atual state es = EnableStatus(id = str(chat_id)) es.put() return False # ================================ This part makes the comunication google-telegram class MeHandler(webapp2.RequestHandler): def get(self): urlfetch.set_default_fetch_deadline(60) self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getMe')))) class GetUpdatesHandler(webapp2.RequestHandler): def get(self): urlfetch.set_default_fetch_deadline(60) self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'getUpdates')))) class SetWebhookHandler(webapp2.RequestHandler): def get(self): urlfetch.set_default_fetch_deadline(60) url = self.request.get('url') if url: self.response.write(json.dumps(json.load(urllib2.urlopen(BASE_URL + 'setWebhook', urllib.urlencode({'url': url}))))) class WebhookHandler(webapp2.RequestHandler): def post(self): urlfetch.set_default_fetch_deadline(60) body = json.loads(self.request.body) logging.info('request body:') logging.info(body) self.response.write(json.dumps(body)) #From here you can take message information, now it only uses the chat_id and text, #you can take more things from it, search how to use json on google update_id = body['update_id'] message = body['message'] message_id = message.get('message_id') date = message.get('date') text = message.get('text') #Takes the 'text' string fr = message.get('from') chat = message['chat'] chat_id = chat['id'] #Chat id string if not text: logging.info('no text') return def reply(msg=None, img=None): #Function used to send messages, it recieves a string message or a binary image if msg: resp = urllib2.urlopen(BASE_URL + 'sendMessage', urllib.urlencode({ 'chat_id': str(chat_id), 'text': msg.encode('utf-8'), 'disable_web_page_preview': 'true', 'reply_to_message_id': str(message_id), })).read() elif img: resp = multipart.post_multipart(BASE_URL + 'sendPhoto', [ ('chat_id', str(chat_id)), ('reply_to_message_id', str(message_id)), ], [ ('photo', 'image.jpg', img), ]) else: logging.error('no msg or img specified') #If there is no image it puts in the google log the string resp = None logging.info('send response:') logging.info(resp) #From here you can make custom commands, just add an 'elif' if text.startswith('/'): if text == '/start': reply('Bot enabled') setEnabled(chat_id, True) #Sets the status to True (read above comments) elif text == '/stop': reply('Bot disabled') setEnabled(chat_id, False) #Changes it to false elif text == '/image': #Creates an aleatory image img = Image.new('RGB', (512, 512)) #Size of the image base = random.randint(0, 16777216) pixels = [base+i*j for i in range(512) for j in range(512)] # generate sample image img.putdata(pixels) output = StringIO.StringIO() img.save(output, 'JPEG') reply(img=output.getvalue()) """If you want to send a different image use this piece of code: img = Image.open("image.jpg") output = StringIO.StringIO() img.save(output, 'JPEG') reply(img=output.getvalue())""" else: reply('What command?') #If it is not a command (does not start with /) elif 'who are you' in text: reply('telebot starter kit, created by yukuku: https://github.com/yukuku/telebot') elif 'what time' in text: reply('look at the top-right corner of your screen!') else: if getEnabled(chat_id): #If the status of the bot is enabled the bot answers you try: resp1 = json.load(urllib2.urlopen('http://www.simsimi.com/requestChat?lc=en&ft=1.0&req=' + urllib.quote_plus(text.encode('utf-8')))) #Sends you mesage to simsimi IA back = resp1.get('res') except urllib2.HTTPError, err: logging.error(err) back = str(err) if not back: reply('okay...') elif 'I HAVE NO RESPONSE' in back: reply('you said something with no meaning') else: reply(back) else: logging.info('not enabled for chat_id {}'.format(chat_id)) #Telegram comunication (dont change) app = webapp2.WSGIApplication([ ('/me', MeHandler), ('/updates', GetUpdatesHandler), ('/set_webhook', SetWebhookHandler), ('/webhook', WebhookHandler), ], debug=True)
apache-2.0
android/android-test
runner/android_junit_runner/java/androidx/test/internal/runner/junit3/DelegatingTestSuite.java
2279
/* * Copyright (C) 2014 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package androidx.test.internal.runner.junit3; import java.util.Enumeration; import junit.framework.Test; import junit.framework.TestResult; import junit.framework.TestSuite; import org.junit.Ignore; /** A {@link TestSuite} that delegates all calls to another {@link TestSuite}. */ @Ignore class DelegatingTestSuite extends TestSuite { private TestSuite wrappedSuite; public DelegatingTestSuite(TestSuite suiteDelegate) { super(); wrappedSuite = suiteDelegate; } /** Return the suite to delegate to */ public TestSuite getDelegateSuite() { return wrappedSuite; } /** * Replace the suite to delegate to * * @param newSuiteDelegate */ public void setDelegateSuite(TestSuite newSuiteDelegate) { wrappedSuite = newSuiteDelegate; } @Override public void addTest(Test test) { wrappedSuite.addTest(test); } @Override public int countTestCases() { return wrappedSuite.countTestCases(); } @Override public String getName() { return wrappedSuite.getName(); } @Override public void runTest(Test test, TestResult result) { wrappedSuite.runTest(test, result); } @Override public void setName(String name) { wrappedSuite.setName(name); } @Override public Test testAt(int index) { return wrappedSuite.testAt(index); } @Override public int testCount() { return wrappedSuite.testCount(); } @Override public Enumeration<Test> tests() { return wrappedSuite.tests(); } @Override public String toString() { return wrappedSuite.toString(); } @Override public void run(TestResult result) { wrappedSuite.run(result); } }
apache-2.0
aws/aws-sdk-cpp
aws-cpp-sdk-cloudformation/include/aws/cloudformation/model/DescribeStackResourcesRequest.h
15332
/** * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. * SPDX-License-Identifier: Apache-2.0. */ #pragma once #include <aws/cloudformation/CloudFormation_EXPORTS.h> #include <aws/cloudformation/CloudFormationRequest.h> #include <aws/core/utils/memory/stl/AWSString.h> #include <utility> namespace Aws { namespace CloudFormation { namespace Model { /** * <p>The input for <a>DescribeStackResources</a> action.</p><p><h3>See Also:</h3> * <a * href="http://docs.aws.amazon.com/goto/WebAPI/cloudformation-2010-05-15/DescribeStackResourcesInput">AWS * API Reference</a></p> */ class AWS_CLOUDFORMATION_API DescribeStackResourcesRequest : public CloudFormationRequest { public: DescribeStackResourcesRequest(); // Service request name is the Operation name which will send this request out, // each operation should has unique request name, so that we can get operation's name from this request. // Note: this is not true for response, multiple operations may have the same response name, // so we can not get operation's name from response. inline virtual const char* GetServiceRequestName() const override { return "DescribeStackResources"; } Aws::String SerializePayload() const override; protected: void DumpBodyToUrl(Aws::Http::URI& uri ) const override; public: /** * <p>The name or the unique stack ID that is associated with the stack, which * aren't always interchangeable:</p> <ul> <li> <p>Running stacks: You can specify * either the stack's name or its unique stack ID.</p> </li> <li> <p>Deleted * stacks: You must specify the unique stack ID.</p> </li> </ul> <p>Default: There * is no default value.</p> <p>Required: Conditional. If you don't specify * <code>StackName</code>, you must specify <code>PhysicalResourceId</code>.</p> */ inline const Aws::String& GetStackName() const{ return m_stackName; } /** * <p>The name or the unique stack ID that is associated with the stack, which * aren't always interchangeable:</p> <ul> <li> <p>Running stacks: You can specify * either the stack's name or its unique stack ID.</p> </li> <li> <p>Deleted * stacks: You must specify the unique stack ID.</p> </li> </ul> <p>Default: There * is no default value.</p> <p>Required: Conditional. If you don't specify * <code>StackName</code>, you must specify <code>PhysicalResourceId</code>.</p> */ inline bool StackNameHasBeenSet() const { return m_stackNameHasBeenSet; } /** * <p>The name or the unique stack ID that is associated with the stack, which * aren't always interchangeable:</p> <ul> <li> <p>Running stacks: You can specify * either the stack's name or its unique stack ID.</p> </li> <li> <p>Deleted * stacks: You must specify the unique stack ID.</p> </li> </ul> <p>Default: There * is no default value.</p> <p>Required: Conditional. If you don't specify * <code>StackName</code>, you must specify <code>PhysicalResourceId</code>.</p> */ inline void SetStackName(const Aws::String& value) { m_stackNameHasBeenSet = true; m_stackName = value; } /** * <p>The name or the unique stack ID that is associated with the stack, which * aren't always interchangeable:</p> <ul> <li> <p>Running stacks: You can specify * either the stack's name or its unique stack ID.</p> </li> <li> <p>Deleted * stacks: You must specify the unique stack ID.</p> </li> </ul> <p>Default: There * is no default value.</p> <p>Required: Conditional. If you don't specify * <code>StackName</code>, you must specify <code>PhysicalResourceId</code>.</p> */ inline void SetStackName(Aws::String&& value) { m_stackNameHasBeenSet = true; m_stackName = std::move(value); } /** * <p>The name or the unique stack ID that is associated with the stack, which * aren't always interchangeable:</p> <ul> <li> <p>Running stacks: You can specify * either the stack's name or its unique stack ID.</p> </li> <li> <p>Deleted * stacks: You must specify the unique stack ID.</p> </li> </ul> <p>Default: There * is no default value.</p> <p>Required: Conditional. If you don't specify * <code>StackName</code>, you must specify <code>PhysicalResourceId</code>.</p> */ inline void SetStackName(const char* value) { m_stackNameHasBeenSet = true; m_stackName.assign(value); } /** * <p>The name or the unique stack ID that is associated with the stack, which * aren't always interchangeable:</p> <ul> <li> <p>Running stacks: You can specify * either the stack's name or its unique stack ID.</p> </li> <li> <p>Deleted * stacks: You must specify the unique stack ID.</p> </li> </ul> <p>Default: There * is no default value.</p> <p>Required: Conditional. If you don't specify * <code>StackName</code>, you must specify <code>PhysicalResourceId</code>.</p> */ inline DescribeStackResourcesRequest& WithStackName(const Aws::String& value) { SetStackName(value); return *this;} /** * <p>The name or the unique stack ID that is associated with the stack, which * aren't always interchangeable:</p> <ul> <li> <p>Running stacks: You can specify * either the stack's name or its unique stack ID.</p> </li> <li> <p>Deleted * stacks: You must specify the unique stack ID.</p> </li> </ul> <p>Default: There * is no default value.</p> <p>Required: Conditional. If you don't specify * <code>StackName</code>, you must specify <code>PhysicalResourceId</code>.</p> */ inline DescribeStackResourcesRequest& WithStackName(Aws::String&& value) { SetStackName(std::move(value)); return *this;} /** * <p>The name or the unique stack ID that is associated with the stack, which * aren't always interchangeable:</p> <ul> <li> <p>Running stacks: You can specify * either the stack's name or its unique stack ID.</p> </li> <li> <p>Deleted * stacks: You must specify the unique stack ID.</p> </li> </ul> <p>Default: There * is no default value.</p> <p>Required: Conditional. If you don't specify * <code>StackName</code>, you must specify <code>PhysicalResourceId</code>.</p> */ inline DescribeStackResourcesRequest& WithStackName(const char* value) { SetStackName(value); return *this;} /** * <p>The logical name of the resource as specified in the template.</p> * <p>Default: There is no default value.</p> */ inline const Aws::String& GetLogicalResourceId() const{ return m_logicalResourceId; } /** * <p>The logical name of the resource as specified in the template.</p> * <p>Default: There is no default value.</p> */ inline bool LogicalResourceIdHasBeenSet() const { return m_logicalResourceIdHasBeenSet; } /** * <p>The logical name of the resource as specified in the template.</p> * <p>Default: There is no default value.</p> */ inline void SetLogicalResourceId(const Aws::String& value) { m_logicalResourceIdHasBeenSet = true; m_logicalResourceId = value; } /** * <p>The logical name of the resource as specified in the template.</p> * <p>Default: There is no default value.</p> */ inline void SetLogicalResourceId(Aws::String&& value) { m_logicalResourceIdHasBeenSet = true; m_logicalResourceId = std::move(value); } /** * <p>The logical name of the resource as specified in the template.</p> * <p>Default: There is no default value.</p> */ inline void SetLogicalResourceId(const char* value) { m_logicalResourceIdHasBeenSet = true; m_logicalResourceId.assign(value); } /** * <p>The logical name of the resource as specified in the template.</p> * <p>Default: There is no default value.</p> */ inline DescribeStackResourcesRequest& WithLogicalResourceId(const Aws::String& value) { SetLogicalResourceId(value); return *this;} /** * <p>The logical name of the resource as specified in the template.</p> * <p>Default: There is no default value.</p> */ inline DescribeStackResourcesRequest& WithLogicalResourceId(Aws::String&& value) { SetLogicalResourceId(std::move(value)); return *this;} /** * <p>The logical name of the resource as specified in the template.</p> * <p>Default: There is no default value.</p> */ inline DescribeStackResourcesRequest& WithLogicalResourceId(const char* value) { SetLogicalResourceId(value); return *this;} /** * <p>The name or unique identifier that corresponds to a physical instance ID of a * resource supported by CloudFormation.</p> <p>For example, for an Amazon Elastic * Compute Cloud (EC2) instance, <code>PhysicalResourceId</code> corresponds to the * <code>InstanceId</code>. You can pass the EC2 <code>InstanceId</code> to * <code>DescribeStackResources</code> to find which stack the instance belongs to * and what other resources are part of the stack.</p> <p>Required: Conditional. If * you don't specify <code>PhysicalResourceId</code>, you must specify * <code>StackName</code>.</p> <p>Default: There is no default value.</p> */ inline const Aws::String& GetPhysicalResourceId() const{ return m_physicalResourceId; } /** * <p>The name or unique identifier that corresponds to a physical instance ID of a * resource supported by CloudFormation.</p> <p>For example, for an Amazon Elastic * Compute Cloud (EC2) instance, <code>PhysicalResourceId</code> corresponds to the * <code>InstanceId</code>. You can pass the EC2 <code>InstanceId</code> to * <code>DescribeStackResources</code> to find which stack the instance belongs to * and what other resources are part of the stack.</p> <p>Required: Conditional. If * you don't specify <code>PhysicalResourceId</code>, you must specify * <code>StackName</code>.</p> <p>Default: There is no default value.</p> */ inline bool PhysicalResourceIdHasBeenSet() const { return m_physicalResourceIdHasBeenSet; } /** * <p>The name or unique identifier that corresponds to a physical instance ID of a * resource supported by CloudFormation.</p> <p>For example, for an Amazon Elastic * Compute Cloud (EC2) instance, <code>PhysicalResourceId</code> corresponds to the * <code>InstanceId</code>. You can pass the EC2 <code>InstanceId</code> to * <code>DescribeStackResources</code> to find which stack the instance belongs to * and what other resources are part of the stack.</p> <p>Required: Conditional. If * you don't specify <code>PhysicalResourceId</code>, you must specify * <code>StackName</code>.</p> <p>Default: There is no default value.</p> */ inline void SetPhysicalResourceId(const Aws::String& value) { m_physicalResourceIdHasBeenSet = true; m_physicalResourceId = value; } /** * <p>The name or unique identifier that corresponds to a physical instance ID of a * resource supported by CloudFormation.</p> <p>For example, for an Amazon Elastic * Compute Cloud (EC2) instance, <code>PhysicalResourceId</code> corresponds to the * <code>InstanceId</code>. You can pass the EC2 <code>InstanceId</code> to * <code>DescribeStackResources</code> to find which stack the instance belongs to * and what other resources are part of the stack.</p> <p>Required: Conditional. If * you don't specify <code>PhysicalResourceId</code>, you must specify * <code>StackName</code>.</p> <p>Default: There is no default value.</p> */ inline void SetPhysicalResourceId(Aws::String&& value) { m_physicalResourceIdHasBeenSet = true; m_physicalResourceId = std::move(value); } /** * <p>The name or unique identifier that corresponds to a physical instance ID of a * resource supported by CloudFormation.</p> <p>For example, for an Amazon Elastic * Compute Cloud (EC2) instance, <code>PhysicalResourceId</code> corresponds to the * <code>InstanceId</code>. You can pass the EC2 <code>InstanceId</code> to * <code>DescribeStackResources</code> to find which stack the instance belongs to * and what other resources are part of the stack.</p> <p>Required: Conditional. If * you don't specify <code>PhysicalResourceId</code>, you must specify * <code>StackName</code>.</p> <p>Default: There is no default value.</p> */ inline void SetPhysicalResourceId(const char* value) { m_physicalResourceIdHasBeenSet = true; m_physicalResourceId.assign(value); } /** * <p>The name or unique identifier that corresponds to a physical instance ID of a * resource supported by CloudFormation.</p> <p>For example, for an Amazon Elastic * Compute Cloud (EC2) instance, <code>PhysicalResourceId</code> corresponds to the * <code>InstanceId</code>. You can pass the EC2 <code>InstanceId</code> to * <code>DescribeStackResources</code> to find which stack the instance belongs to * and what other resources are part of the stack.</p> <p>Required: Conditional. If * you don't specify <code>PhysicalResourceId</code>, you must specify * <code>StackName</code>.</p> <p>Default: There is no default value.</p> */ inline DescribeStackResourcesRequest& WithPhysicalResourceId(const Aws::String& value) { SetPhysicalResourceId(value); return *this;} /** * <p>The name or unique identifier that corresponds to a physical instance ID of a * resource supported by CloudFormation.</p> <p>For example, for an Amazon Elastic * Compute Cloud (EC2) instance, <code>PhysicalResourceId</code> corresponds to the * <code>InstanceId</code>. You can pass the EC2 <code>InstanceId</code> to * <code>DescribeStackResources</code> to find which stack the instance belongs to * and what other resources are part of the stack.</p> <p>Required: Conditional. If * you don't specify <code>PhysicalResourceId</code>, you must specify * <code>StackName</code>.</p> <p>Default: There is no default value.</p> */ inline DescribeStackResourcesRequest& WithPhysicalResourceId(Aws::String&& value) { SetPhysicalResourceId(std::move(value)); return *this;} /** * <p>The name or unique identifier that corresponds to a physical instance ID of a * resource supported by CloudFormation.</p> <p>For example, for an Amazon Elastic * Compute Cloud (EC2) instance, <code>PhysicalResourceId</code> corresponds to the * <code>InstanceId</code>. You can pass the EC2 <code>InstanceId</code> to * <code>DescribeStackResources</code> to find which stack the instance belongs to * and what other resources are part of the stack.</p> <p>Required: Conditional. If * you don't specify <code>PhysicalResourceId</code>, you must specify * <code>StackName</code>.</p> <p>Default: There is no default value.</p> */ inline DescribeStackResourcesRequest& WithPhysicalResourceId(const char* value) { SetPhysicalResourceId(value); return *this;} private: Aws::String m_stackName; bool m_stackNameHasBeenSet; Aws::String m_logicalResourceId; bool m_logicalResourceIdHasBeenSet; Aws::String m_physicalResourceId; bool m_physicalResourceIdHasBeenSet; }; } // namespace Model } // namespace CloudFormation } // namespace Aws
apache-2.0
alexmilowski/python-hadoop-rest-api
pyox/apps/monitor/views.py
1129
from flask import Blueprint, render_template, Response, current_app, send_from_directory from pyox import ServiceError from pyox.apps.monitor.api import get_cluster_client from datetime import datetime cluster_ui = Blueprint('cluster_ui',__name__,template_folder='templates') @cluster_ui.route('/') def index(): client = get_cluster_client() try: info = client.info(); scheduler = client.scheduler(); metrics = client.metrics(); info['startedOn'] = datetime.fromtimestamp(info['startedOn'] / 1e3).isoformat() return render_template('cluster.html',info=info,scheduler=scheduler,metrics=metrics) except ServiceError as err: return Response(status=err.status_code,response=err.message if err.status_code!=401 else 'Authentication Required',mimetype="text/plain",headers={'WWW-Authenticate': 'Basic realm="Login Required"'}) assets = Blueprint('assets_ui',__name__) @assets.route('/assets/<path:path>') def send_asset(path): dir = current_app.config.get('ASSETS') if dir is None: dir = __file__[:__file__.rfind('/')] + '/assets/' return send_from_directory(dir, path)
apache-2.0
salamb/girder
tests/base.py
21800
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # Copyright 2013 Kitware Inc. # # Licensed under the Apache License, Version 2.0 ( the "License" ); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################### import base64 import codecs import cherrypy import io import json import logging import os import shutil import signal import six import sys import unittest import uuid from six import BytesIO from six.moves import urllib from girder.utility import model_importer from girder.utility.server import setup as setupServer from girder.constants import AccessType, ROOT_DIR, SettingKey from girder.models import getDbConnection from . import mock_smtp from . import mock_s3 from . import mongo_replicaset local = cherrypy.lib.httputil.Host('127.0.0.1', 30000) remote = cherrypy.lib.httputil.Host('127.0.0.1', 30001) mockSmtp = mock_smtp.MockSmtpReceiver() mockS3Server = None enabledPlugins = [] def startServer(mock=True, mockS3=False): """ Test cases that communicate with the server should call this function in their setUpModule() function. """ server = setupServer(test=True, plugins=enabledPlugins) if mock: cherrypy.server.unsubscribe() cherrypy.engine.start() # Make server quiet (won't announce start/stop or requests) cherrypy.config.update({'environment': 'embedded'}) # Log all requests if we asked to do so if 'cherrypy' in os.environ.get('EXTRADEBUG', '').split(): cherrypy.config.update({'log.screen': True}) logHandler = logging.StreamHandler(sys.stdout) logHandler.setLevel(logging.DEBUG) cherrypy.log.error_log.addHandler(logHandler) mockSmtp.start() if mockS3: global mockS3Server mockS3Server = mock_s3.startMockS3Server() return server def stopServer(): """ Test cases that communicate with the server should call this function in their tearDownModule() function. """ cherrypy.engine.exit() mockSmtp.stop() def dropTestDatabase(dropModels=True): """ Call this to clear all contents from the test database. Also forces models to reload. """ db_connection = getDbConnection() dbName = cherrypy.config['database']['uri'].split('/')[-1] if 'girder_test_' not in dbName: raise Exception('Expected a testing database name, but got %s' % dbName) db_connection.drop_database(dbName) if dropModels: model_importer.reinitializeAll() def dropGridFSDatabase(dbName): """ Clear all contents from a gridFS database used as an assetstore. :param dbName: the name of the database to drop. """ db_connection = getDbConnection() db_connection.drop_database(dbName) def dropFsAssetstore(path): """ Delete all of the files in a filesystem assetstore. This unlinks the path, which is potentially dangerous. :param path: the path to remove. """ if os.path.isdir(path): shutil.rmtree(path) class TestCase(unittest.TestCase, model_importer.ModelImporter): """ Test case base class for the application. Adds helpful utilities for database and HTTP communication. """ def setUp(self, assetstoreType=None, dropModels=True): """ We want to start with a clean database each time, so we drop the test database before each test. We then add an assetstore so the file model can be used without 500 errors. :param assetstoreType: if 'gridfs' or 's3', use that assetstore. For any other value, use a filesystem assetstore. """ self.assetstoreType = assetstoreType dropTestDatabase(dropModels=dropModels) assetstoreName = os.environ.get('GIRDER_TEST_ASSETSTORE', 'test') assetstorePath = os.path.join( ROOT_DIR, 'tests', 'assetstore', assetstoreName) if assetstoreType == 'gridfs': # Name this as '_auto' to prevent conflict with assetstores created # within test methods gridfsDbName = 'girder_test_%s_assetstore_auto' % assetstoreName dropGridFSDatabase(gridfsDbName) self.assetstore = self.model('assetstore'). \ createGridFsAssetstore(name='Test', db=gridfsDbName) elif assetstoreType == 'gridfsrs': gridfsDbName = 'girder_test_%s_rs_assetstore_auto' % assetstoreName mongo_replicaset.startMongoReplicaSet() self.assetstore = self.model('assetstore'). \ createGridFsAssetstore( name='Test', db=gridfsDbName, mongohost='mongodb://127.0.0.1:27070,127.0.0.1:27071,' '127.0.0.1:27072', replicaset='replicaset') elif assetstoreType == 's3': self.assetstore = self.model('assetstore'). \ createS3Assetstore(name='Test', bucket='bucketname', accessKeyId='test', secret='test', service=mockS3Server.service) else: dropFsAssetstore(assetstorePath) self.assetstore = self.model('assetstore'). \ createFilesystemAssetstore(name='Test', root=assetstorePath) addr = ':'.join(map(str, mockSmtp.address)) self.model('setting').set(SettingKey.SMTP_HOST, addr) self.model('setting').set(SettingKey.UPLOAD_MINIMUM_CHUNK_SIZE, 0) self.model('setting').set(SettingKey.PLUGINS_ENABLED, enabledPlugins) def tearDown(self): """ Stop any services that we started just for this test. """ # If "self.setUp" is overridden, "self.assetstoreType" may not be set if getattr(self, 'assetstoreType', None) == 'gridfsrs': mongo_replicaset.stopMongoReplicaSet() def assertStatusOk(self, response): """ Call this to assert that the response yielded a 200 OK output_status. :param response: The response object. """ self.assertStatus(response, 200) def assertStatus(self, response, code): """ Call this to assert that a given HTTP status code was returned. :param response: The response object. :param code: The status code. :type code: int or str """ code = str(code) if not response.output_status.startswith(code.encode()): msg = 'Response status was %s, not %s.' % (response.output_status, code) if hasattr(response, 'json'): msg += ' Response body was:\n%s' % json.dumps( response.json, sort_keys=True, indent=4, separators=(',', ': ')) self.fail(msg) def assertHasKeys(self, obj, keys): """ Assert that the given object has the given list of keys. :param obj: The dictionary object. :param keys: The keys it must contain. :type keys: list or tuple """ for k in keys: self.assertTrue(k in obj, 'Object does not contain key "%s"' % k) def assertRedirect(self, resp, url=None): """ Assert that we were given an HTTP redirect response, and optionally assert that you were redirected to a specific URL. :param resp: The response object. :param url: If you know the URL you expect to be redirected to, you should pass it here. :type url: str """ self.assertStatus(resp, 303) self.assertTrue('Location' in resp.headers) if url: self.assertEqual(url, resp.headers['Location']) def assertNotHasKeys(self, obj, keys): """ Assert that the given object does not have any of the given list of keys. :param obj: The dictionary object. :param keys: The keys it must not contain. :type keys: list or tuple """ for k in keys: self.assertFalse(k in obj, 'Object contains key "%s"' % k) def assertValidationError(self, response, field=None): """ Assert that a ValidationException was thrown with the given field. :param response: The response object. :param field: The field that threw the validation exception. :type field: str """ self.assertStatus(response, 400) self.assertEqual(response.json['type'], 'validation') self.assertEqual(response.json.get('field', None), field) def assertAccessDenied(self, response, level, modelName, user=None): if level == AccessType.READ: ls = 'Read' elif level == AccessType.WRITE: ls = 'Write' else: ls = 'Admin' if user is None: self.assertStatus(response, 401) else: self.assertStatus(response, 403) self.assertEqual('%s access denied for %s.' % (ls, modelName), response.json['message']) def assertMissingParameter(self, response, param): """ Assert that the response was a "parameter missing" error response. :param response: The response object. :param param: The name of the missing parameter. :type param: str """ self.assertEqual("Parameter '%s' is required." % param, response.json.get('message', '')) self.assertStatus(response, 400) def getSseMessages(self, resp): messages = self.getBody(resp).strip().split('\n\n') if not messages or messages == ['']: return () return [json.loads(m.replace('data: ', '')) for m in messages] def uploadFile(self, name, contents, user, parent, parentType='folder', mimeType=None): """ Upload a file. This is meant for small testing files, not very large files that should be sent in multiple chunks. :param name: The name of the file. :type name: str :param contents: The file contents :type contents: str :param user: The user performing the upload. :type user: dict :param parent: The parent document. :type parent: dict :param parentType: The type of the parent ("folder" or "item") :type parentType: str :param mimeType: Explicit MIME type to set on the file. :type mimeType: str :returns: The file that was created. :rtype: dict """ mimeType = mimeType or 'application/octet-stream' resp = self.request( path='/file', method='POST', user=user, params={ 'parentType': parentType, 'parentId': str(parent['_id']), 'name': name, 'size': len(contents), 'mimeType': mimeType }) self.assertStatusOk(resp) fields = [('offset', 0), ('uploadId', resp.json['_id'])] files = [('chunk', name, contents)] resp = self.multipartRequest( path='/file/chunk', user=user, fields=fields, files=files) self.assertStatusOk(resp) file = resp.json self.assertHasKeys(file, ['itemId']) self.assertEqual(file['name'], name) self.assertEqual(file['size'], len(contents)) self.assertEqual(file['mimeType'], mimeType) return self.model('file').load(file['_id'], force=True) def ensureRequiredParams(self, path='/', method='GET', required=(), user=None): """ Ensure that a set of parameters is required by the endpoint. :param path: The endpoint path to test. :param method: The HTTP method of the endpoint. :param required: The required parameter set. :type required: sequence of str """ for exclude in required: params = dict.fromkeys([p for p in required if p != exclude], '') resp = self.request(path=path, method=method, params=params, user=user) self.assertMissingParameter(resp, exclude) def _genToken(self, user): """ Helper method for creating an authentication token for the user. """ token = self.model('token').createToken(user) return str(token['_id']) def _buildHeaders(self, headers, cookie, user, token, basicAuth, authHeader): if cookie is not None: headers.append(('Cookie', cookie)) if user is not None: headers.append(('Girder-Token', self._genToken(user))) elif token is not None: if isinstance(token, dict): headers.append(('Girder-Token', token['_id'])) else: headers.append(('Girder-Token', token)) if basicAuth is not None: auth = base64.b64encode(basicAuth.encode('utf8')) headers.append((authHeader, 'Basic %s' % auth.decode())) def request(self, path='/', method='GET', params=None, user=None, prefix='/api/v1', isJson=True, basicAuth=None, body=None, type=None, exception=False, cookie=None, token=None, additionalHeaders=None, useHttps=False, authHeader='Girder-Authorization'): """ Make an HTTP request. :param path: The path part of the URI. :type path: str :param method: The HTTP method. :type method: str :param params: The HTTP parameters. :type params: dict :param prefix: The prefix to use before the path. :param isJson: Whether the response is a JSON object. :param basicAuth: A string to pass with the Authorization: Basic header of the form 'login:password' :param exception: Set this to True if a 500 is expected from this call. :param cookie: A custom cookie value to set. :param token: If you want to use an existing token to login, pass the token ID. :type token: str :param additionalHeaders: A list of headers to add to the request. Each item is a tuple of the form (header-name, header-value). :param useHttps: If True, pretend to use HTTPS. :param authHeader: The HTTP request header to use for authentication. :type authHeader: str :returns: The cherrypy response object from the request. """ if not params: params = {} headers = [('Host', '127.0.0.1'), ('Accept', 'application/json')] qs = fd = None if additionalHeaders: headers.extend(additionalHeaders) if method in ['POST', 'PUT', 'PATCH'] or body: if isinstance(body, six.string_types): body = body.encode('utf8') qs = urllib.parse.urlencode(params).encode('utf8') if type is None: headers.append(('Content-Type', 'application/x-www-form-urlencoded')) else: headers.append(('Content-Type', type)) qs = body headers.append(('Content-Length', '%d' % len(qs))) fd = BytesIO(qs) qs = None elif params: qs = urllib.parse.urlencode(params) app = cherrypy.tree.apps[''] request, response = app.get_serving( local, remote, 'http' if not useHttps else 'https', 'HTTP/1.1') request.show_tracebacks = True self._buildHeaders(headers, cookie, user, token, basicAuth, authHeader) # Python2 will not match Unicode URLs url = str(prefix + path) try: response = request.run(method, url, qs, 'HTTP/1.1', headers, fd) finally: if fd: fd.close() if isJson: body = self.getBody(response) try: response.json = json.loads(body) except Exception: print(body) raise AssertionError('Did not receive JSON response') if not exception and response.output_status.startswith(b'500'): raise AssertionError("Internal server error: %s" % self.getBody(response)) return response def getBody(self, response, text=True): """ Returns the response body as a text type or binary string. :param response: The response object from the server. :param text: If true, treat the data as a text string, otherwise, treat as binary. """ data = '' if text else b'' for chunk in response.body: if text and isinstance(chunk, six.binary_type): chunk = chunk.decode('utf8') elif not text and not isinstance(chunk, six.binary_type): chunk = chunk.encode('utf8') data += chunk return data def multipartRequest(self, fields, files, path, method='POST', user=None, prefix='/api/v1', isJson=True): """ Make an HTTP request with multipart/form-data encoding. This can be used to send files with the request. :param fields: List of (name, value) tuples. :param files: List of (name, filename, content) tuples. :param path: The path part of the URI. :type path: str :param method: The HTTP method. :type method: str :param prefix: The prefix to use before the path. :param isJson: Whether the response is a JSON object. :returns: The cherrypy response object from the request. """ contentType, body, size = MultipartFormdataEncoder().encode( fields, files) headers = [('Host', '127.0.0.1'), ('Accept', 'application/json'), ('Content-Type', contentType), ('Content-Length', str(size))] app = cherrypy.tree.apps[''] request, response = app.get_serving(local, remote, 'http', 'HTTP/1.1') request.show_tracebacks = True if user is not None: headers.append(('Girder-Token', self._genToken(user))) fd = io.BytesIO(body) # Python2 will not match Unicode URLs url = str(prefix + path) try: response = request.run(method, url, None, 'HTTP/1.1', headers, fd) finally: fd.close() if isJson: body = self.getBody(response) try: response.json = json.loads(body) except Exception: print(body) raise AssertionError('Did not receive JSON response') if response.output_status.startswith(b'500'): raise AssertionError("Internal server error: %s" % self.getBody(response)) return response class MultipartFormdataEncoder(object): """ This class is adapted from http://stackoverflow.com/a/18888633/2550451 It is used as a helper for creating multipart/form-data requests to simulate file uploads. """ def __init__(self): self.boundary = uuid.uuid4().hex self.contentType = \ 'multipart/form-data; boundary=%s' % self.boundary @classmethod def u(cls, s): if sys.hexversion < 0x03000000 and isinstance(s, str): s = s.decode('utf-8') if sys.hexversion >= 0x03000000 and isinstance(s, bytes): s = s.decode('utf-8') return s def iter(self, fields, files): encoder = codecs.getencoder('utf-8') for (key, value) in fields: key = self.u(key) yield encoder('--%s\r\n' % self.boundary) yield encoder(self.u('Content-Disposition: form-data; ' 'name="%s"\r\n') % key) yield encoder('\r\n') if isinstance(value, int) or isinstance(value, float): value = str(value) yield encoder(self.u(value)) yield encoder('\r\n') for (key, filename, content) in files: key = self.u(key) filename = self.u(filename) yield encoder('--%s\r\n' % self.boundary) yield encoder(self.u('Content-Disposition: form-data; name="%s";' ' filename="%s"\r\n' % (key, filename))) yield encoder('Content-Type: application/octet-stream\r\n') yield encoder('\r\n') yield (content, len(content)) yield encoder('\r\n') yield encoder('--%s--\r\n' % self.boundary) def encode(self, fields, files): body = io.BytesIO() size = 0 for chunk, chunkLen in self.iter(fields, files): if not isinstance(chunk, six.binary_type): chunk = chunk.encode('utf8') body.write(chunk) size += chunkLen return self.contentType, body.getvalue(), size def _sigintHandler(*args): print('Received SIGINT, shutting down mock SMTP server...') mockSmtp.stop() sys.exit(1) signal.signal(signal.SIGINT, _sigintHandler)
apache-2.0
victorywang80/Maintenance
saltstack/src/salt/modules/win_autoruns.py
1932
# -*- coding: utf-8 -*- ''' Module for listing programs that automatically run on startup (very alpha...not tested on anything but my Win 7x64) ''' # Import python libs import os # Import salt libs import salt.utils # Define a function alias in order not to shadow built-in's __func_alias__ = { 'list_': 'list' } # Define the module's virtual name __virtualname__ = 'autoruns' def __virtual__(): ''' Only works on Windows systems ''' if salt.utils.is_windows(): return __virtualname__ return False def list_(): ''' Get a list of automatically running programs CLI Example: .. code-block:: bash salt '*' autoruns.list ''' autoruns = {} # Find autoruns in registry keys = ['HKLM\\Software\\Microsoft\\Windows\\CurrentVersion\\Run', 'HKLM\\Software\\Microsoft\\Windows\\CurrentVersion\\Run /reg:64', 'HKCU\\Software\\Microsoft\\Windows\\CurrentVersion\\Run' ] winver = __grains__['osfullname'] for key in keys: autoruns[key] = [] cmd = 'reg query ' + key print cmd for line in __salt__['cmd.run'](cmd).splitlines(): if line and line[0:4] != "HKEY" and line[0:5] != "ERROR": # Remove junk lines autoruns[key].append(line) # Find autoruns in user's startup folder if '7' in winver: user_dir = 'C:\\Users\\' startup_dir = '\\AppData\\Roaming\\Microsoft\\Windows\\Start Menu\\Programs\\Startup' else: user_dir = 'C:\\Documents and Settings\\' startup_dir = '\\Start Menu\\Programs\\Startup' for user in os.listdir(user_dir): try: full_dir = user_dir + user + startup_dir files = os.listdir(full_dir) autoruns[full_dir] = [] for afile in files: autoruns[full_dir].append(afile) except Exception: pass return autoruns
apache-2.0
SuperMap/iClient-Android-Example
help/html/mergedProjects/forAndroidJavaDoc/com/supermap/android/spatialAnalyst/SurfaceAnalystMethod.html
15988
<?xml version="1.0" encoding="utf-8"?> <!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"> <!--NewPage--> <html xmlns="http://www.w3.org/1999/xhtml"> <head> <!-- Generated by javadoc (build 1.6.0_26) on Tue Jan 13 19:21:07 CST 2015 --> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <title>SurfaceAnalystMethod</title> <meta name="date" content="2015-01-13" /> <link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style" /> <script type="text/javascript"> //<![CDATA[ function windowTitle() { if (location.href.indexOf('is-external=true') == -1) { parent.document.title="SurfaceAnalystMethod"; } } //]]> </script> <script type="text/javascript" language="JavaScript"> //<![CDATA[ function reDo() { if (innerWidth != origWidth || innerHeight != origHeight) location.reload(); } if ((parseInt(navigator.appVersion) == 4) && (navigator.appName == "Netscape")) { origWidth = innerWidth; origHeight = innerHeight; onresize = reDo; } onerror = null; //]]> </script> <style type="text/css"> <!-- div.WebHelpPopupMenu { position:absolute; left:0px; top:0px; z-index:4; visibility:hidden; } --> </style> <script type="text/javascript" language="javascript1.2" src="../../../../whmsg.js" charset="utf-8"></script> <script type="text/javascript" language="javascript" src="../../../../whver.js" charset="utf-8"></script> <script type="text/javascript" language="javascript1.2" src="../../../../whutils.js" charset="utf-8"></script> <script type="text/javascript" language="javascript1.2" src="../../../../whproxy.js" charset="utf-8"></script> <script type="text/javascript" language="javascript1.2" src="../../../../whlang.js" charset="utf-8"></script> <script type="text/javascript" language="javascript1.2" src="../../../../whtopic.js" charset="utf-8"></script> <meta name="generator" content="Adobe RoboHelp 10" /> </head> <body><script type="text/javascript" language="javascript1.2">//<![CDATA[ <!-- if (window.gbWhTopic) { var strUrl = document.location.href; var bc = 0; var n = strUrl.toLowerCase().indexOf("bc-"); if(n != -1) { document.location.href = strUrl.substring(0, n); bc = strUrl.substring(n+3); } if (window.addTocInfo) { addTocInfo("com\nsupermap\nandroid\nspatialAnalyst\nSurfaceAnalystMethod"); addButton("show",BTN_TEXT,"Show","","","","",0,0,"","",""); } if (window.writeBtnStyle) writeBtnStyle(); if (window.writeIntopicBar) writeIntopicBar(1); } else if (window.gbIE4) document.location.reload(); onLoadHandler = function() { if (window.setRelStartPage) { setTimeout("setRelStartPage('../../../../forAndroidJavaDoc.htm');", 1) setTimeout("UpdateBreadCrumbsMarker();", 1); } } if (window.addEventListener){ window.addEventListener('load', onLoadHandler, false); } else if (window.attachEvent){ window.attachEvent('onload', onLoadHandler); } function onSetStartPage() { autoSync(1); sendSyncInfo(); sendAveInfoOut(); } //--> //]]></script> <script type="text/javascript" src="../../../../ehlpdhtm.js"></script> <hr /> <!-- ========= START OF TOP NAVBAR ======= --> <a name="navbar_top" id="navbar_top"><!-- --></a> <a href="#skip-navbar_top" title="跳过导航链接"></a> <table border="0" width="100%" cellpadding="1" cellspacing="0" summary=""> <tr> <td colspan="2" bgcolor="#EEEEFF" class="NavBarCell1"><a name="navbar_top_firstrow" id="navbar_top_firstrow"><!-- --></a> <table border="0" cellpadding="0" cellspacing="3" summary=""> <tr align="center" valign="top"> <td bgcolor="#EEEEFF" class="NavBarCell1"><a href="../../../../overview-summary.html"><font class="NavBarFont1"><b>概述</b></font></a>&#160;</td> <td bgcolor="#EEEEFF" class="NavBarCell1"><a href="package-summary.html"><font class="NavBarFont1"><b>软件包</b></font></a>&#160;</td> <td bgcolor="#FFFFFF" class="NavBarCell1Rev">&#160;<font class="NavBarFont1Rev"><b>类</b></font>&#160;</td> <td bgcolor="#EEEEFF" class="NavBarCell1"><a href="package-tree.html"><font class="NavBarFont1"><b>树</b></font></a>&#160;</td> <td bgcolor="#EEEEFF" class="NavBarCell1"><a href="../../../../deprecated-list.html"><font class="NavBarFont1"><b>已过时</b></font></a>&#160;</td> <td bgcolor="#EEEEFF" class="NavBarCell1"><a href="../../../../index-all.html"><font class="NavBarFont1"><b>索引</b></font></a>&#160;</td> <td bgcolor="#EEEEFF" class="NavBarCell1"><a href="../../../../help-doc.html"><font class="NavBarFont1"><b>帮助</b></font></a>&#160;</td> </tr> </table> </td> <td align="right" valign="top" rowspan="3"></td> </tr> <tr> <td bgcolor="white" class="NavBarCell2"><font size="-2">&#160;<a href="SpatialRelationType.html" title="com.supermap.android.spatialAnalyst 中的枚举"><b>上一个类</b></a>&#160; &#160;<a href="SurfaceAnalystParameters.html" title="com.supermap.android.spatialAnalyst 中的类"><b>下一个类</b></a></font></td> <td bgcolor="white" class="NavBarCell2"><font size="-2"><a href="../../../../index.html?com/supermap/android/spatialAnalyst/SurfaceAnalystMethod.html" target="_top"></a> &#160; &#160;<a href="SurfaceAnalystMethod.html" target="_top"><b>无框架</b></a> &#160; &#160;<script type="text/javascript"> //<![CDATA[ <!-- if(window==top) { document.writeln('<A HREF="../../../../allclasses-noframe.html"><B>所有类<\/B><\/A>'); } //--> //]]> </script> <noscript><a href="../../../../allclasses-noframe.html"><b>所有类</b></a></noscript></font></td> </tr> <tr> <td valign="top" class="NavBarCell3"><font size="-2">摘要:&#160;嵌套&#160;|&#160;<a href="#enum_constant_summary">枚举常量</a>&#160;|&#160;字段&#160;|&#160;<a href="#method_summary">方法</a></font></td> <td valign="top" class="NavBarCell3"><font size="-2">详细信息:&#160;<a href="#enum_constant_detail">枚举常量</a>&#160;|&#160;字段&#160;|&#160;<a href="#method_detail">方法</a></font></td> </tr> </table> <a name="skip-navbar_top" id="skip-navbar_top"></a> <!-- ========= END OF TOP NAVBAR ========= --> <hr /> <!-- ======== START OF CLASS DATA ======== --> <h2><font size="-1">com.supermap.android.spatialAnalyst</font><br /> 枚举 SurfaceAnalystMethod</h2> <pre> java.lang.Object <img src="../../../../resources/inherit.gif" alt="继承者 " />java.lang.Enum&lt;<a href="SurfaceAnalystMethod.html" title="com.supermap.android.spatialAnalyst 中的枚举">SurfaceAnalystMethod</a>&gt; <img src="../../../../resources/inherit.gif" alt="继承者 " /><b>com.supermap.android.spatialAnalyst.SurfaceAnalystMethod</b> </pre> <dl> <dt><b>所有已实现的接口:</b></dt> <dd>java.io.Serializable, java.lang.Comparable&lt;<a href="SurfaceAnalystMethod.html" title="com.supermap.android.spatialAnalyst 中的枚举">SurfaceAnalystMethod</a>&gt;</dd> </dl> <hr /> <dl> <dd> <pre> public enum <b>SurfaceAnalystMethod</b> </pre> <dt>extends java.lang.Enum&lt;<a href="SurfaceAnalystMethod.html" title="com.supermap.android.spatialAnalyst 中的枚举">SurfaceAnalystMethod</a>&gt;</dt> </dd> </dl> <p></p> <p>表面分析类型枚举类。</p> <p>包括等值线、等值面提取两种。</p> <p></p> <p></p> <hr /> <p><!-- =========== ENUM CONSTANT SUMMARY =========== --> <a name="enum_constant_summary" id="enum_constant_summary"><!-- --></a></p> <table border="1" width="100%" cellpadding="3" cellspacing="0" summary=""> <tr bgcolor="#CCCCFF" class="TableHeadingColor"> <th align="left" colspan="2"><font size="+2"><b>枚举常量摘要</b></font></th> </tr> <tr bgcolor="white" class="TableRowColor"> <td><code><b><a href="SurfaceAnalystMethod.html#ISOLINE">ISOLINE</a></b></code><br /> &#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160; 等值线提取</td> </tr> <tr bgcolor="white" class="TableRowColor"> <td><code><b><a href="SurfaceAnalystMethod.html#ISOREGION">ISOREGION</a></b></code><br /> &#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160; 等值面提取</td> </tr> </table> &#160; <!-- ========== METHOD SUMMARY =========== --> <a name="method_summary" id="method_summary"><!-- --></a> <table border="1" width="100%" cellpadding="3" cellspacing="0" summary=""> <tr bgcolor="#CCCCFF" class="TableHeadingColor"> <th align="left" colspan="2"><font size="+2"><b>方法摘要</b></font></th> </tr> <tr bgcolor="white" class="TableRowColor"> <td align="right" valign="top" width="1%"><font size="-1"><code>static&#160;<a href="SurfaceAnalystMethod.html" title="com.supermap.android.spatialAnalyst 中的枚举">SurfaceAnalystMethod</a></code></font></td> <td><code><b><a href="SurfaceAnalystMethod.html#valueOf(java.lang.String)">valueOf</a></b>(java.lang.String&#160;name)</code><br /> &#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;返回带有指定名称的该类型的枚举常量。</td> </tr> <tr bgcolor="white" class="TableRowColor"> <td align="right" valign="top" width="1%"><font size="-1"><code>static&#160;<a href="SurfaceAnalystMethod.html" title="com.supermap.android.spatialAnalyst 中的枚举">SurfaceAnalystMethod</a>[]</code></font></td> <td><code><b><a href="SurfaceAnalystMethod.html#values()">values</a></b>()</code><br /> &#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;&#160;按照声明该枚举类型的常量的顺序,返回 包含这些常量的数组。</td> </tr> </table> &#160;<a name="methods_inherited_from_class_java.lang.Enum" id="methods_inherited_from_class_java.lang.Enum"><!-- --></a> <table border="1" width="100%" cellpadding="3" cellspacing="0" summary=""> <tr bgcolor="#EEEEFF" class="TableSubHeadingColor"> <th align="left"><b>从类 java.lang.Enum 继承的方法</b></th> </tr> <tr bgcolor="white" class="TableRowColor"> <td><code>clone, compareTo, equals, finalize, getDeclaringClass, hashCode, name, ordinal, toString, valueOf</code></td> </tr> </table> &#160;<a name="methods_inherited_from_class_java.lang.Object" id="methods_inherited_from_class_java.lang.Object"><!-- --></a> <table border="1" width="100%" cellpadding="3" cellspacing="0" summary=""> <tr bgcolor="#EEEEFF" class="TableSubHeadingColor"> <th align="left"><b>从类 java.lang.Object 继承的方法</b></th> </tr> <tr bgcolor="white" class="TableRowColor"> <td><code>getClass, notify, notifyAll, wait, wait, wait</code></td> </tr> </table> &#160; <p><!-- ============ ENUM CONSTANT DETAIL =========== --> <a name="enum_constant_detail" id="enum_constant_detail"><!-- --></a></p> <table border="1" width="100%" cellpadding="3" cellspacing="0" summary=""> <tr bgcolor="#CCCCFF" class="TableHeadingColor"> <th align="left" colspan="1"><font size="+2"><b>枚举常量详细信息</b></font></th> </tr> </table> <a name="ISOLINE" id="ISOLINE"><!-- --></a> <h3>ISOLINE</h3> <pre> public static final <a href="SurfaceAnalystMethod.html" title="com.supermap.android.spatialAnalyst 中的枚举">SurfaceAnalystMethod</a> <b>ISOLINE</b> </pre> <dl> <dd> <p>等值线提取</p> <p></p> </dd> </dl> <hr /> <a name="ISOREGION" id="ISOREGION"><!-- --></a> <h3>ISOREGION</h3> <pre> public static final <a href="SurfaceAnalystMethod.html" title="com.supermap.android.spatialAnalyst 中的枚举">SurfaceAnalystMethod</a> <b>ISOREGION</b> </pre> <dl> <dd> <p>等值面提取</p> <p></p> </dd> </dl> <!-- ============ METHOD DETAIL ========== --> <a name="method_detail" id="method_detail"><!-- --></a> <table border="1" width="100%" cellpadding="3" cellspacing="0" summary=""> <tr bgcolor="#CCCCFF" class="TableHeadingColor"> <th align="left" colspan="1"><font size="+2"><b>方法详细信息</b></font></th> </tr> </table> <a name="values()"><!-- --></a> <h3>values</h3> <pre> public static <a href="SurfaceAnalystMethod.html" title="com.supermap.android.spatialAnalyst 中的枚举">SurfaceAnalystMethod</a>[] <b>values</b>() </pre> <dl> <dd>按照声明该枚举类型的常量的顺序,返回 包含这些常量的数组。该方法可用于迭代 常量,如下所示: <pre> for (SurfaceAnalystMethod c : SurfaceAnalystMethod.values()) System.out.println(c); </pre> <p></p> </dd> </dl> <hr /> <a name="valueOf(java.lang.String)"><!-- --></a> <h3>valueOf</h3> <pre> public static <a href="SurfaceAnalystMethod.html" title="com.supermap.android.spatialAnalyst 中的枚举">SurfaceAnalystMethod</a> <b>valueOf</b>(java.lang.String name) </pre> <dl> <dd>返回带有指定名称的该类型的枚举常量。 字符串必须与用于声明该类型的枚举常量的 标识符<i>完全</i>匹配。(不允许有多余 的空格。) <p></p> </dd> <dd> <dl> <dt><b>参数:</b></dt> <dd><code>name</code> - 要返回的枚举常量的名称。</dd> <dt><b>返回:</b></dt> <dd>返回带有指定名称的枚举常量</dd> <dt><b>抛出:</b></dt> <dd><code>如果该枚举类型没有带有指定名称的常量,</code> - 则抛出 IllegalArgumentException</dd> </dl> </dd> </dl> <!-- ========= END OF CLASS DATA ========= --> <hr /> <!-- ======= START OF BOTTOM NAVBAR ====== --> <a name="navbar_bottom" id="navbar_bottom"><!-- --></a> <a href="#skip-navbar_bottom" title="跳过导航链接"></a> <table border="0" width="100%" cellpadding="1" cellspacing="0" summary=""> <tr> <td colspan="2" bgcolor="#EEEEFF" class="NavBarCell1"><a name="navbar_bottom_firstrow" id="navbar_bottom_firstrow"><!-- --></a> <table border="0" cellpadding="0" cellspacing="3" summary=""> <tr align="center" valign="top"> <td bgcolor="#EEEEFF" class="NavBarCell1"><a href="../../../../overview-summary.html"><font class="NavBarFont1"><b>概述</b></font></a>&#160;</td> <td bgcolor="#EEEEFF" class="NavBarCell1"><a href="package-summary.html"><font class="NavBarFont1"><b>软件包</b></font></a>&#160;</td> <td bgcolor="#FFFFFF" class="NavBarCell1Rev">&#160;<font class="NavBarFont1Rev"><b>类</b></font>&#160;</td> <td bgcolor="#EEEEFF" class="NavBarCell1"><a href="package-tree.html"><font class="NavBarFont1"><b>树</b></font></a>&#160;</td> <td bgcolor="#EEEEFF" class="NavBarCell1"><a href="../../../../deprecated-list.html"><font class="NavBarFont1"><b>已过时</b></font></a>&#160;</td> <td bgcolor="#EEEEFF" class="NavBarCell1"><a href="../../../../index-all.html"><font class="NavBarFont1"><b>索引</b></font></a>&#160;</td> <td bgcolor="#EEEEFF" class="NavBarCell1"><a href="../../../../help-doc.html"><font class="NavBarFont1"><b>帮助</b></font></a>&#160;</td> </tr> </table> </td> <td align="right" valign="top" rowspan="3"></td> </tr> <tr> <td bgcolor="white" class="NavBarCell2"><font size="-2">&#160;<a href="SpatialRelationType.html" title="com.supermap.android.spatialAnalyst 中的枚举"><b>上一个类</b></a>&#160; &#160;<a href="SurfaceAnalystParameters.html" title="com.supermap.android.spatialAnalyst 中的类"><b>下一个类</b></a></font></td> <td bgcolor="white" class="NavBarCell2"><font size="-2"><a href="../../../../index.html?com/supermap/android/spatialAnalyst/SurfaceAnalystMethod.html" target="_top"></a> &#160; &#160;<a href="SurfaceAnalystMethod.html" target="_top"><b>无框架</b></a> &#160; &#160;<script type="text/javascript"> //<![CDATA[ <!-- if(window==top) { document.writeln('<A HREF="../../../../allclasses-noframe.html"><B>所有类<\/B><\/A>'); } //--> //]]> </script> <noscript><a href="../../../../allclasses-noframe.html"><b>所有类</b></a></noscript></font></td> </tr> <tr> <td valign="top" class="NavBarCell3"><font size="-2">摘要:&#160;嵌套&#160;|&#160;<a href="#enum_constant_summary">枚举常量</a>&#160;|&#160;字段&#160;|&#160;<a href="#method_summary">方法</a></font></td> <td valign="top" class="NavBarCell3"><font size="-2">详细信息:&#160;<a href="#enum_constant_detail">枚举常量</a>&#160;|&#160;字段&#160;|&#160;<a href="#method_detail">方法</a></font></td> </tr> </table> <a name="skip-navbar_bottom" id="skip-navbar_bottom"></a> <!-- ======== END OF BOTTOM NAVBAR ======= --> <hr /> <script type="text/javascript" language="javascript1.2">//<![CDATA[ <!-- if (window.writeIntopicBar) writeIntopicBar(0); highlightSearch(); //--> //]]></script> </body> </html>
apache-2.0
snailycy/AndroidHybridLib
sample/src/main/java/com/github/snailycy/androidhybridlib/JSGetCachePlugin.java
748
package com.github.snailycy.androidhybridlib; import android.widget.Toast; import com.github.snailycy.hybridlib.bridge.BaseJSPluginSync; import org.json.JSONObject; /** * Created by ycy on 2017/9/27. */ public class JSGetCachePlugin extends BaseJSPluginSync { @Override public String jsCallNative(String requestParams) { Toast.makeText(getContext(), "jsCallNative , requestParams = " + requestParams, Toast.LENGTH_LONG).show(); try { JSONObject jsonObject1 = new JSONObject(requestParams); JSONObject jsonObject = new JSONObject(); jsonObject.put("aaa", "hahahahah"); return jsonObject.toString(); } catch (Exception e) { } return null; } }
apache-2.0
artem-zinnatullin/RxJava
src/main/java/io/reactivex/Maybe.java
233229
/** * Copyright (c) 2016-present, RxJava Contributors. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See * the License for the specific language governing permissions and limitations under the License. */ package io.reactivex; import java.util.NoSuchElementException; import java.util.concurrent.*; import org.reactivestreams.*; import io.reactivex.annotations.*; import io.reactivex.disposables.Disposable; import io.reactivex.exceptions.Exceptions; import io.reactivex.functions.*; import io.reactivex.internal.functions.*; import io.reactivex.internal.fuseable.*; import io.reactivex.internal.observers.BlockingMultiObserver; import io.reactivex.internal.operators.flowable.*; import io.reactivex.internal.operators.maybe.*; import io.reactivex.internal.operators.mixed.*; import io.reactivex.internal.util.*; import io.reactivex.observers.TestObserver; import io.reactivex.plugins.RxJavaPlugins; import io.reactivex.schedulers.Schedulers; /** * The {@code Maybe} class represents a deferred computation and emission of a single value, no value at all or an exception. * <p> * The {@code Maybe} class implements the {@link MaybeSource} base interface and the default consumer * type it interacts with is the {@link MaybeObserver} via the {@link #subscribe(MaybeObserver)} method. * <p> * The {@code Maybe} operates with the following sequential protocol: * <pre><code> * onSubscribe (onSuccess | onError | onComplete)? * </code></pre> * <p> * Note that {@code onSuccess}, {@code onError} and {@code onComplete} are mutually exclusive events; unlike {@code Observable}, * {@code onSuccess} is never followed by {@code onError} or {@code onComplete}. * <p> * Like {@link Observable}, a running {@code Maybe} can be stopped through the {@link Disposable} instance * provided to consumers through {@link MaybeObserver#onSubscribe}. * <p> * Like an {@code Observable}, a {@code Maybe} is lazy, can be either "hot" or "cold", synchronous or * asynchronous. {@code Maybe} instances returned by the methods of this class are <em>cold</em> * and there is a standard <em>hot</em> implementation in the form of a subject: * {@link io.reactivex.subjects.MaybeSubject MaybeSubject}. * <p> * The documentation for this class makes use of marble diagrams. The following legend explains these diagrams: * <p> * <img width="640" height="370" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/maybe.png" alt=""> * <p> * See {@link Flowable} or {@link Observable} for the * implementation of the Reactive Pattern for a stream or vector of values. * <p> * Example: * <pre><code> * Disposable d = Maybe.just("Hello World") * .delay(10, TimeUnit.SECONDS, Schedulers.io()) * .subscribeWith(new DisposableMaybeObserver&lt;String&gt;() { * &#64;Override * public void onStart() { * System.out.println("Started"); * } * * &#64;Override * public void onSuccess(String value) { * System.out.println("Success: " + value); * } * * &#64;Override * public void onError(Throwable error) { * error.printStackTrace(); * } * * &#64;Override * public void onComplete() { * System.out.println("Done!"); * } * }); * * Thread.sleep(5000); * * d.dispose(); * </code></pre> * <p> * Note that by design, subscriptions via {@link #subscribe(MaybeObserver)} can't be disposed * from the outside (hence the * {@code void} return of the {@link #subscribe(MaybeObserver)} method) and it is the * responsibility of the implementor of the {@code MaybeObserver} to allow this to happen. * RxJava supports such usage with the standard * {@link io.reactivex.observers.DisposableMaybeObserver DisposableMaybeObserver} instance. * For convenience, the {@link #subscribeWith(MaybeObserver)} method is provided as well to * allow working with a {@code MaybeObserver} (or subclass) instance to be applied with in * a fluent manner (such as in the example above). * * @param <T> the value type * @since 2.0 * @see io.reactivex.observers.DisposableMaybeObserver */ public abstract class Maybe<T> implements MaybeSource<T> { /** * Runs multiple MaybeSources and signals the events of the first one that signals (disposing * the rest). * <p> * <img width="640" height="519" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.amb.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code amb} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the value type * @param sources the Iterable sequence of sources. A subscription to each source will * occur in the same order as in the Iterable. * @return the new Maybe instance */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Maybe<T> amb(final Iterable<? extends MaybeSource<? extends T>> sources) { ObjectHelper.requireNonNull(sources, "sources is null"); return RxJavaPlugins.onAssembly(new MaybeAmb<T>(null, sources)); } /** * Runs multiple MaybeSources and signals the events of the first one that signals (disposing * the rest). * <p> * <img width="640" height="519" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.ambArray.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code ambArray} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the value type * @param sources the array of sources. A subscription to each source will * occur in the same order as in the array. * @return the new Maybe instance */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) @SuppressWarnings("unchecked") public static <T> Maybe<T> ambArray(final MaybeSource<? extends T>... sources) { if (sources.length == 0) { return empty(); } if (sources.length == 1) { return wrap((MaybeSource<T>)sources[0]); } return RxJavaPlugins.onAssembly(new MaybeAmb<T>(sources, null)); } /** * Concatenate the single values, in a non-overlapping fashion, of the MaybeSource sources provided by * an Iterable sequence. * <p> * <img width="640" height="526" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concat.i.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code concat} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the value type * @param sources the Iterable sequence of MaybeSource instances * @return the new Flowable instance */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> concat(Iterable<? extends MaybeSource<? extends T>> sources) { ObjectHelper.requireNonNull(sources, "sources is null"); return RxJavaPlugins.onAssembly(new MaybeConcatIterable<T>(sources)); } /** * Returns a Flowable that emits the items emitted by two MaybeSources, one after the other. * <p> * <img width="640" height="422" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concat.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code concat} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> the common value type * @param source1 * a MaybeSource to be concatenated * @param source2 * a MaybeSource to be concatenated * @return a Flowable that emits items emitted by the two source MaybeSources, one after the other. * @see <a href="http://reactivex.io/documentation/operators/concat.html">ReactiveX operators documentation: Concat</a> */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) @SuppressWarnings("unchecked") public static <T> Flowable<T> concat(MaybeSource<? extends T> source1, MaybeSource<? extends T> source2) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); return concatArray(source1, source2); } /** * Returns a Flowable that emits the items emitted by three MaybeSources, one after the other. * <p> * <img width="640" height="422" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concat.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code concat} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> the common value type * @param source1 * a MaybeSource to be concatenated * @param source2 * a MaybeSource to be concatenated * @param source3 * a MaybeSource to be concatenated * @return a Flowable that emits items emitted by the three source MaybeSources, one after the other. * @see <a href="http://reactivex.io/documentation/operators/concat.html">ReactiveX operators documentation: Concat</a> */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) @SuppressWarnings("unchecked") public static <T> Flowable<T> concat( MaybeSource<? extends T> source1, MaybeSource<? extends T> source2, MaybeSource<? extends T> source3) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); ObjectHelper.requireNonNull(source3, "source3 is null"); return concatArray(source1, source2, source3); } /** * Returns a Flowable that emits the items emitted by four MaybeSources, one after the other. * <p> * <img width="640" height="422" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concat.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code concat} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> the common value type * @param source1 * a MaybeSource to be concatenated * @param source2 * a MaybeSource to be concatenated * @param source3 * a MaybeSource to be concatenated * @param source4 * a MaybeSource to be concatenated * @return a Flowable that emits items emitted by the four source MaybeSources, one after the other. * @see <a href="http://reactivex.io/documentation/operators/concat.html">ReactiveX operators documentation: Concat</a> */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) @SuppressWarnings("unchecked") public static <T> Flowable<T> concat( MaybeSource<? extends T> source1, MaybeSource<? extends T> source2, MaybeSource<? extends T> source3, MaybeSource<? extends T> source4) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); ObjectHelper.requireNonNull(source3, "source3 is null"); ObjectHelper.requireNonNull(source4, "source4 is null"); return concatArray(source1, source2, source3, source4); } /** * Concatenate the single values, in a non-overlapping fashion, of the MaybeSource sources provided by * a Publisher sequence. * <p> * <img width="640" height="416" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concat.p.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer and * expects the {@code Publisher} to honor backpressure as well. If the sources {@code Publisher} * violates this, a {@link io.reactivex.exceptions.MissingBackpressureException} is signalled.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code concat} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the value type * @param sources the Publisher of MaybeSource instances * @return the new Flowable instance */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> concat(Publisher<? extends MaybeSource<? extends T>> sources) { return concat(sources, 2); } /** * Concatenate the single values, in a non-overlapping fashion, of the MaybeSource sources provided by * a Publisher sequence. * <p> * <img width="640" height="416" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concat.pn.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer and * expects the {@code Publisher} to honor backpressure as well. If the sources {@code Publisher} * violates this, a {@link io.reactivex.exceptions.MissingBackpressureException} is signalled.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code concat} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the value type * @param sources the Publisher of MaybeSource instances * @param prefetch the number of MaybeSources to prefetch from the Publisher * @return the new Flowable instance */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) @SuppressWarnings({ "unchecked", "rawtypes" }) public static <T> Flowable<T> concat(Publisher<? extends MaybeSource<? extends T>> sources, int prefetch) { ObjectHelper.requireNonNull(sources, "sources is null"); ObjectHelper.verifyPositive(prefetch, "prefetch"); return RxJavaPlugins.onAssembly(new FlowableConcatMapPublisher(sources, MaybeToPublisher.instance(), prefetch, ErrorMode.IMMEDIATE)); } /** * Concatenate the single values, in a non-overlapping fashion, of the MaybeSource sources in the array. * <p> * <img width="640" height="526" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concatArray.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The returned {@code Flowable} honors the backpressure of the downstream consumer.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code concatArray} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the value type * @param sources the array of MaybeSource instances * @return the new Flowable instance */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) @SuppressWarnings("unchecked") public static <T> Flowable<T> concatArray(MaybeSource<? extends T>... sources) { ObjectHelper.requireNonNull(sources, "sources is null"); if (sources.length == 0) { return Flowable.empty(); } if (sources.length == 1) { return RxJavaPlugins.onAssembly(new MaybeToFlowable<T>((MaybeSource<T>)sources[0])); } return RxJavaPlugins.onAssembly(new MaybeConcatArray<T>(sources)); } /** * Concatenates a variable number of MaybeSource sources and delays errors from any of them * till all terminate. * <p> * <img width="640" height="425" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concatArrayDelayError.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code concatArrayDelayError} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param sources the array of sources * @param <T> the common base value type * @return the new Flowable instance * @throws NullPointerException if sources is null */ @SuppressWarnings("unchecked") @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> concatArrayDelayError(MaybeSource<? extends T>... sources) { if (sources.length == 0) { return Flowable.empty(); } else if (sources.length == 1) { return RxJavaPlugins.onAssembly(new MaybeToFlowable<T>((MaybeSource<T>)sources[0])); } return RxJavaPlugins.onAssembly(new MaybeConcatArrayDelayError<T>(sources)); } /** * Concatenates a sequence of MaybeSource eagerly into a single stream of values. * <p> * Eager concatenation means that once a subscriber subscribes, this operator subscribes to all of the * source MaybeSources. The operator buffers the value emitted by these MaybeSources and then drains them * in order, each one after the previous one completes. * <p> * <img width="640" height="489" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concatArrayEager.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>This method does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the value type * @param sources a sequence of MaybeSources that need to be eagerly concatenated * @return the new Flowable instance with the specified concatenation behavior */ @SuppressWarnings({ "rawtypes", "unchecked" }) @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> concatArrayEager(MaybeSource<? extends T>... sources) { return Flowable.fromArray(sources).concatMapEager((Function)MaybeToPublisher.instance()); } /** * Concatenates the Iterable sequence of MaybeSources into a single sequence by subscribing to each MaybeSource, * one after the other, one at a time and delays any errors till the all inner MaybeSources terminate. * <p> * <img width="640" height="469" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concatDelayError.i.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code concatDelayError} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> the common element base type * @param sources the Iterable sequence of MaybeSources * @return the new Flowable with the concatenating behavior */ @SuppressWarnings({ "unchecked", "rawtypes" }) @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> concatDelayError(Iterable<? extends MaybeSource<? extends T>> sources) { ObjectHelper.requireNonNull(sources, "sources is null"); return Flowable.fromIterable(sources).concatMapDelayError((Function)MaybeToPublisher.instance()); } /** * Concatenates the Publisher sequence of Publishers into a single sequence by subscribing to each inner Publisher, * one after the other, one at a time and delays any errors till the all inner and the outer Publishers terminate. * <p> * <img width="640" height="360" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concatDelayError.p.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>{@code concatDelayError} fully supports backpressure.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code concatDelayError} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> the common element base type * @param sources the Publisher sequence of Publishers * @return the new Publisher with the concatenating behavior */ @SuppressWarnings({ "unchecked", "rawtypes" }) @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> concatDelayError(Publisher<? extends MaybeSource<? extends T>> sources) { return Flowable.fromPublisher(sources).concatMapDelayError((Function)MaybeToPublisher.instance()); } /** * Concatenates a sequence of MaybeSources eagerly into a single stream of values. * <p> * Eager concatenation means that once a subscriber subscribes, this operator subscribes to all of the * source MaybeSources. The operator buffers the values emitted by these MaybeSources and then drains them * in order, each one after the previous one completes. * <p> * <img width="640" height="526" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concatEager.i.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>Backpressure is honored towards the downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>This method does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the value type * @param sources a sequence of MaybeSource that need to be eagerly concatenated * @return the new Flowable instance with the specified concatenation behavior */ @SuppressWarnings({ "rawtypes", "unchecked" }) @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> concatEager(Iterable<? extends MaybeSource<? extends T>> sources) { return Flowable.fromIterable(sources).concatMapEager((Function)MaybeToPublisher.instance()); } /** * Concatenates a Publisher sequence of MaybeSources eagerly into a single stream of values. * <p> * Eager concatenation means that once a subscriber subscribes, this operator subscribes to all of the * emitted source Publishers as they are observed. The operator buffers the values emitted by these * Publishers and then drains them in order, each one after the previous one completes. * <p> * <img width="640" height="511" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.concatEager.p.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>Backpressure is honored towards the downstream and the outer Publisher is * expected to support backpressure. Violating this assumption, the operator will * signal {@link io.reactivex.exceptions.MissingBackpressureException}.</dd> * <dt><b>Scheduler:</b></dt> * <dd>This method does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the value type * @param sources a sequence of Publishers that need to be eagerly concatenated * @return the new Publisher instance with the specified concatenation behavior */ @SuppressWarnings({ "rawtypes", "unchecked" }) @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> concatEager(Publisher<? extends MaybeSource<? extends T>> sources) { return Flowable.fromPublisher(sources).concatMapEager((Function)MaybeToPublisher.instance()); } /** * Provides an API (via a cold Maybe) that bridges the reactive world with the callback-style world. * <p> * Example: * <pre><code> * Maybe.&lt;Event&gt;create(emitter -&gt; { * Callback listener = new Callback() { * &#64;Override * public void onEvent(Event e) { * if (e.isNothing()) { * emitter.onComplete(); * } else { * emitter.onSuccess(e); * } * } * * &#64;Override * public void onFailure(Exception e) { * emitter.onError(e); * } * }; * * AutoCloseable c = api.someMethod(listener); * * emitter.setCancellable(c::close); * * }); * </code></pre> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code create} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the value type * @param onSubscribe the emitter that is called when a MaybeObserver subscribes to the returned {@code Maybe} * @return the new Maybe instance * @see MaybeOnSubscribe * @see Cancellable */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Maybe<T> create(MaybeOnSubscribe<T> onSubscribe) { ObjectHelper.requireNonNull(onSubscribe, "onSubscribe is null"); return RxJavaPlugins.onAssembly(new MaybeCreate<T>(onSubscribe)); } /** * Calls a Callable for each individual MaybeObserver to return the actual MaybeSource source to * be subscribed to. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code defer} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the value type * @param maybeSupplier the Callable that is called for each individual MaybeObserver and * returns a MaybeSource instance to subscribe to * @return the new Maybe instance */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Maybe<T> defer(final Callable<? extends MaybeSource<? extends T>> maybeSupplier) { ObjectHelper.requireNonNull(maybeSupplier, "maybeSupplier is null"); return RxJavaPlugins.onAssembly(new MaybeDefer<T>(maybeSupplier)); } /** * Returns a (singleton) Maybe instance that calls {@link MaybeObserver#onComplete onComplete} * immediately. * <p> * <img width="640" height="190" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/empty.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code empty} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the value type * @return the new Maybe instance */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) @SuppressWarnings("unchecked") public static <T> Maybe<T> empty() { return RxJavaPlugins.onAssembly((Maybe<T>)MaybeEmpty.INSTANCE); } /** * Returns a Maybe that invokes a subscriber's {@link MaybeObserver#onError onError} method when the * subscriber subscribes to it. * <p> * <img width="640" height="447" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.error.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code error} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param exception * the particular Throwable to pass to {@link MaybeObserver#onError onError} * @param <T> * the type of the item (ostensibly) emitted by the Maybe * @return a Maybe that invokes the subscriber's {@link MaybeObserver#onError onError} method when * the subscriber subscribes to it * @see <a href="http://reactivex.io/documentation/operators/empty-never-throw.html">ReactiveX operators documentation: Throw</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Maybe<T> error(Throwable exception) { ObjectHelper.requireNonNull(exception, "exception is null"); return RxJavaPlugins.onAssembly(new MaybeError<T>(exception)); } /** * Returns a Maybe that invokes a {@link MaybeObserver}'s {@link MaybeObserver#onError onError} method when the * MaybeObserver subscribes to it. * <p> * <img width="640" height="190" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/error.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code error} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param supplier * a Callable factory to return a Throwable for each individual MaybeObserver * @param <T> * the type of the items (ostensibly) emitted by the Maybe * @return a Maybe that invokes the {@link MaybeObserver}'s {@link MaybeObserver#onError onError} method when * the MaybeObserver subscribes to it * @see <a href="http://reactivex.io/documentation/operators/empty-never-throw.html">ReactiveX operators documentation: Throw</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Maybe<T> error(Callable<? extends Throwable> supplier) { ObjectHelper.requireNonNull(supplier, "errorSupplier is null"); return RxJavaPlugins.onAssembly(new MaybeErrorCallable<T>(supplier)); } /** * Returns a Maybe instance that runs the given Action for each subscriber and * emits either its exception or simply completes. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code fromAction} does not operate by default on a particular {@link Scheduler}.</dd> * <dt><b>Error handling:</b></dt> * <dd> If the {@link Action} throws an exception, the respective {@link Throwable} is * delivered to the downstream via {@link MaybeObserver#onError(Throwable)}, * except when the downstream has disposed this {@code Maybe} source. * In this latter case, the {@code Throwable} is delivered to the global error handler via * {@link RxJavaPlugins#onError(Throwable)} as an {@link io.reactivex.exceptions.UndeliverableException UndeliverableException}. * </dd> * </dl> * @param <T> the target type * @param run the runnable to run for each subscriber * @return the new Maybe instance * @throws NullPointerException if run is null */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Maybe<T> fromAction(final Action run) { ObjectHelper.requireNonNull(run, "run is null"); return RxJavaPlugins.onAssembly(new MaybeFromAction<T>(run)); } /** * Wraps a CompletableSource into a Maybe. * * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code fromCompletable} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the target type * @param completableSource the CompletableSource to convert from * @return the new Maybe instance * @throws NullPointerException if completable is null */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Maybe<T> fromCompletable(CompletableSource completableSource) { ObjectHelper.requireNonNull(completableSource, "completableSource is null"); return RxJavaPlugins.onAssembly(new MaybeFromCompletable<T>(completableSource)); } /** * Wraps a SingleSource into a Maybe. * * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code fromSingle} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the target type * @param singleSource the SingleSource to convert from * @return the new Maybe instance * @throws NullPointerException if single is null */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Maybe<T> fromSingle(SingleSource<T> singleSource) { ObjectHelper.requireNonNull(singleSource, "singleSource is null"); return RxJavaPlugins.onAssembly(new MaybeFromSingle<T>(singleSource)); } /** * Returns a {@link Maybe} that invokes the given {@link Callable} for each individual {@link MaybeObserver} that * subscribes and emits the resulting non-null item via {@code onSuccess} while * considering a {@code null} result from the {@code Callable} as indication for valueless completion * via {@code onComplete}. * <p> * This operator allows you to defer the execution of the given {@code Callable} until a {@code MaybeObserver} * subscribes to the returned {@link Maybe}. In other terms, this source operator evaluates the given * {@code Callable} "lazily". * <p> * Note that the {@code null} handling of this operator differs from the similar source operators in the other * {@link io.reactivex base reactive classes}. Those operators signal a {@code NullPointerException} if the value returned by their * {@code Callable} is {@code null} while this {@code fromCallable} considers it to indicate the * returned {@code Maybe} is empty. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code fromCallable} does not operate by default on a particular {@link Scheduler}.</dd> * <dt><b>Error handling:</b></dt> * <dd>Any non-fatal exception thrown by {@link Callable#call()} will be forwarded to {@code onError}, * except if the {@code MaybeObserver} disposed the subscription in the meantime. In this latter case, * the exception is forwarded to the global error handler via * {@link io.reactivex.plugins.RxJavaPlugins#onError(Throwable)} wrapped into a * {@link io.reactivex.exceptions.UndeliverableException UndeliverableException}. * Fatal exceptions are rethrown and usually will end up in the executing thread's * {@link java.lang.Thread.UncaughtExceptionHandler#uncaughtException(Thread, Throwable)} handler.</dd> * </dl> * * @param callable * a {@link Callable} instance whose execution should be deferred and performed for each individual * {@code MaybeObserver} that subscribes to the returned {@link Maybe}. * @param <T> * the type of the item emitted by the {@link Maybe}. * @return a new Maybe instance */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Maybe<T> fromCallable(@NonNull final Callable<? extends T> callable) { ObjectHelper.requireNonNull(callable, "callable is null"); return RxJavaPlugins.onAssembly(new MaybeFromCallable<T>(callable)); } /** * Converts a {@link Future} into a Maybe, treating a null result as an indication of emptiness. * <p> * <img width="640" height="315" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/from.Future.png" alt=""> * <p> * You can convert any object that supports the {@link Future} interface into a Maybe that emits the * return value of the {@link Future#get} method of that object, by passing the object into the {@code from} * method. * <p> * <em>Important note:</em> This Maybe is blocking; you cannot dispose it. * <p> * Unlike 1.x, disposing the Maybe won't cancel the future. If necessary, one can use composition to achieve the * cancellation effect: {@code futureMaybe.doOnDispose(() -> future.cancel(true));}. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code fromFuture} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param future * the source {@link Future} * @param <T> * the type of object that the {@link Future} returns, and also the type of item to be emitted by * the resulting Maybe * @return a Maybe that emits the item from the source {@link Future} * @see <a href="http://reactivex.io/documentation/operators/from.html">ReactiveX operators documentation: From</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Maybe<T> fromFuture(Future<? extends T> future) { ObjectHelper.requireNonNull(future, "future is null"); return RxJavaPlugins.onAssembly(new MaybeFromFuture<T>(future, 0L, null)); } /** * Converts a {@link Future} into a Maybe, with a timeout on the Future. * <p> * <img width="640" height="315" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/from.Future.png" alt=""> * <p> * You can convert any object that supports the {@link Future} interface into a Maybe that emits the * return value of the {@link Future#get} method of that object, by passing the object into the {@code fromFuture} * method. * <p> * Unlike 1.x, disposing the Maybe won't cancel the future. If necessary, one can use composition to achieve the * cancellation effect: {@code futureMaybe.doOnCancel(() -> future.cancel(true));}. * <p> * <em>Important note:</em> This Maybe is blocking on the thread it gets subscribed on; you cannot dispose it. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code fromFuture} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param future * the source {@link Future} * @param timeout * the maximum time to wait before calling {@code get} * @param unit * the {@link TimeUnit} of the {@code timeout} argument * @param <T> * the type of object that the {@link Future} returns, and also the type of item to be emitted by * the resulting Maybe * @return a Maybe that emits the item from the source {@link Future} * @see <a href="http://reactivex.io/documentation/operators/from.html">ReactiveX operators documentation: From</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Maybe<T> fromFuture(Future<? extends T> future, long timeout, TimeUnit unit) { ObjectHelper.requireNonNull(future, "future is null"); ObjectHelper.requireNonNull(unit, "unit is null"); return RxJavaPlugins.onAssembly(new MaybeFromFuture<T>(future, timeout, unit)); } /** * Returns a Maybe instance that runs the given Action for each subscriber and * emits either its exception or simply completes. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code fromRunnable} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the target type * @param run the runnable to run for each subscriber * @return the new Maybe instance * @throws NullPointerException if run is null */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Maybe<T> fromRunnable(final Runnable run) { ObjectHelper.requireNonNull(run, "run is null"); return RxJavaPlugins.onAssembly(new MaybeFromRunnable<T>(run)); } /** * Returns a {@code Maybe} that emits a specified item. * <p> * <img width="640" height="485" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.just.png" alt=""> * <p> * To convert any object into a {@code Maybe} that emits that object, pass that object into the * {@code just} method. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code just} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param item * the item to emit * @param <T> * the type of that item * @return a {@code Maybe} that emits {@code item} * @see <a href="http://reactivex.io/documentation/operators/just.html">ReactiveX operators documentation: Just</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Maybe<T> just(T item) { ObjectHelper.requireNonNull(item, "item is null"); return RxJavaPlugins.onAssembly(new MaybeJust<T>(item)); } /** * Merges an Iterable sequence of MaybeSource instances into a single Flowable sequence, * running all MaybeSources at once. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code merge} does not operate by default on a particular {@link Scheduler}.</dd> * <dt><b>Error handling:</b></dt> * <dd>If any of the source {@code MaybeSource}s signal a {@code Throwable} via {@code onError}, the resulting * {@code Flowable} terminates with that {@code Throwable} and all other source {@code MaybeSource}s are disposed. * If more than one {@code MaybeSource} signals an error, the resulting {@code Flowable} may terminate with the * first one's error or, depending on the concurrency of the sources, may terminate with a * {@code CompositeException} containing two or more of the various error signals. * {@code Throwable}s that didn't make into the composite will be sent (individually) to the global error handler via * {@link RxJavaPlugins#onError(Throwable)} method as {@code UndeliverableException} errors. Similarly, {@code Throwable}s * signaled by source(s) after the returned {@code Flowable} has been cancelled or terminated with a * (composite) error will be sent to the same global error handler. * Use {@link #mergeDelayError(Iterable)} to merge sources and terminate only when all source {@code MaybeSource}s * have completed or failed with an error. * </dd> * </dl> * @param <T> the common and resulting value type * @param sources the Iterable sequence of MaybeSource sources * @return the new Flowable instance * @see #mergeDelayError(Iterable) */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> merge(Iterable<? extends MaybeSource<? extends T>> sources) { return merge(Flowable.fromIterable(sources)); } /** * Merges a Flowable sequence of MaybeSource instances into a single Flowable sequence, * running all MaybeSources at once. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code merge} does not operate by default on a particular {@link Scheduler}.</dd> * <dt><b>Error handling:</b></dt> * <dd>If any of the source {@code MaybeSource}s signal a {@code Throwable} via {@code onError}, the resulting * {@code Flowable} terminates with that {@code Throwable} and all other source {@code MaybeSource}s are disposed. * If more than one {@code MaybeSource} signals an error, the resulting {@code Flowable} may terminate with the * first one's error or, depending on the concurrency of the sources, may terminate with a * {@code CompositeException} containing two or more of the various error signals. * {@code Throwable}s that didn't make into the composite will be sent (individually) to the global error handler via * {@link RxJavaPlugins#onError(Throwable)} method as {@code UndeliverableException} errors. Similarly, {@code Throwable}s * signaled by source(s) after the returned {@code Flowable} has been cancelled or terminated with a * (composite) error will be sent to the same global error handler. * Use {@link #mergeDelayError(Publisher)} to merge sources and terminate only when all source {@code MaybeSource}s * have completed or failed with an error. * </dd> * </dl> * @param <T> the common and resulting value type * @param sources the Flowable sequence of MaybeSource sources * @return the new Flowable instance * @see #mergeDelayError(Publisher) */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> merge(Publisher<? extends MaybeSource<? extends T>> sources) { return merge(sources, Integer.MAX_VALUE); } /** * Merges a Flowable sequence of MaybeSource instances into a single Flowable sequence, * running at most maxConcurrency MaybeSources at once. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code merge} does not operate by default on a particular {@link Scheduler}.</dd> * <dt><b>Error handling:</b></dt> * <dd>If any of the source {@code MaybeSource}s signal a {@code Throwable} via {@code onError}, the resulting * {@code Flowable} terminates with that {@code Throwable} and all other source {@code MaybeSource}s are disposed. * If more than one {@code MaybeSource} signals an error, the resulting {@code Flowable} may terminate with the * first one's error or, depending on the concurrency of the sources, may terminate with a * {@code CompositeException} containing two or more of the various error signals. * {@code Throwable}s that didn't make into the composite will be sent (individually) to the global error handler via * {@link RxJavaPlugins#onError(Throwable)} method as {@code UndeliverableException} errors. Similarly, {@code Throwable}s * signaled by source(s) after the returned {@code Flowable} has been cancelled or terminated with a * (composite) error will be sent to the same global error handler. * Use {@link #mergeDelayError(Publisher, int)} to merge sources and terminate only when all source {@code MaybeSource}s * have completed or failed with an error. * </dd> * </dl> * @param <T> the common and resulting value type * @param sources the Flowable sequence of MaybeSource sources * @param maxConcurrency the maximum number of concurrently running MaybeSources * @return the new Flowable instance * @see #mergeDelayError(Publisher, int) */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) @SuppressWarnings({ "unchecked", "rawtypes" }) public static <T> Flowable<T> merge(Publisher<? extends MaybeSource<? extends T>> sources, int maxConcurrency) { ObjectHelper.requireNonNull(sources, "source is null"); ObjectHelper.verifyPositive(maxConcurrency, "maxConcurrency"); return RxJavaPlugins.onAssembly(new FlowableFlatMapPublisher(sources, MaybeToPublisher.instance(), false, maxConcurrency, 1)); } /** * Flattens a {@code MaybeSource} that emits a {@code MaybeSource} into a single {@code MaybeSource} that emits the item * emitted by the nested {@code MaybeSource}, without any transformation. * <p> * <img width="640" height="393" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.merge.oo.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code merge} does not operate by default on a particular {@link Scheduler}.</dd> * <dt><b>Error handling:</b></dt> * <dd>The resulting {@code Maybe} emits the outer source's or the inner {@code MaybeSource}'s {@code Throwable} as is. * Unlike the other {@code merge()} operators, this operator won't and can't produce a {@code CompositeException} because there is * only one possibility for the outer or the inner {@code MaybeSource} to emit an {@code onError} signal. * Therefore, there is no need for a {@code mergeDelayError(MaybeSource<MaybeSource<T>>)} operator. * </dd> * </dl> * * @param <T> the value type of the sources and the output * @param source * a {@code MaybeSource} that emits a {@code MaybeSource} * @return a {@code Maybe} that emits the item that is the result of flattening the {@code MaybeSource} emitted * by {@code source} * @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) @SuppressWarnings({ "unchecked", "rawtypes" }) public static <T> Maybe<T> merge(MaybeSource<? extends MaybeSource<? extends T>> source) { ObjectHelper.requireNonNull(source, "source is null"); return RxJavaPlugins.onAssembly(new MaybeFlatten(source, Functions.identity())); } /** * Flattens two MaybeSources into a single Flowable, without any transformation. * <p> * <img width="640" height="483" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.merge.png" alt=""> * <p> * You can combine items emitted by multiple MaybeSources so that they appear as a single Flowable, by * using the {@code merge} method. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code merge} does not operate by default on a particular {@link Scheduler}.</dd> * <dt><b>Error handling:</b></dt> * <dd>If any of the source {@code MaybeSource}s signal a {@code Throwable} via {@code onError}, the resulting * {@code Flowable} terminates with that {@code Throwable} and all other source {@code MaybeSource}s are disposed. * If more than one {@code MaybeSource} signals an error, the resulting {@code Flowable} may terminate with the * first one's error or, depending on the concurrency of the sources, may terminate with a * {@code CompositeException} containing two or more of the various error signals. * {@code Throwable}s that didn't make into the composite will be sent (individually) to the global error handler via * {@link RxJavaPlugins#onError(Throwable)} method as {@code UndeliverableException} errors. Similarly, {@code Throwable}s * signaled by source(s) after the returned {@code Flowable} has been cancelled or terminated with a * (composite) error will be sent to the same global error handler. * Use {@link #mergeDelayError(MaybeSource, MaybeSource)} to merge sources and terminate only when all source {@code MaybeSource}s * have completed or failed with an error. * </dd> * </dl> * * @param <T> the common value type * @param source1 * a MaybeSource to be merged * @param source2 * a MaybeSource to be merged * @return a Flowable that emits all of the items emitted by the source MaybeSources * @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a> * @see #mergeDelayError(MaybeSource, MaybeSource) */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) @SuppressWarnings("unchecked") public static <T> Flowable<T> merge( MaybeSource<? extends T> source1, MaybeSource<? extends T> source2 ) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); return mergeArray(source1, source2); } /** * Flattens three MaybeSources into a single Flowable, without any transformation. * <p> * <img width="640" height="483" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.merge.png" alt=""> * <p> * You can combine items emitted by multiple MaybeSources so that they appear as a single Flowable, by using * the {@code merge} method. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code merge} does not operate by default on a particular {@link Scheduler}.</dd> * <dt><b>Error handling:</b></dt> * <dd>If any of the source {@code MaybeSource}s signal a {@code Throwable} via {@code onError}, the resulting * {@code Flowable} terminates with that {@code Throwable} and all other source {@code MaybeSource}s are disposed. * If more than one {@code MaybeSource} signals an error, the resulting {@code Flowable} may terminate with the * first one's error or, depending on the concurrency of the sources, may terminate with a * {@code CompositeException} containing two or more of the various error signals. * {@code Throwable}s that didn't make into the composite will be sent (individually) to the global error handler via * {@link RxJavaPlugins#onError(Throwable)} method as {@code UndeliverableException} errors. Similarly, {@code Throwable}s * signaled by source(s) after the returned {@code Flowable} has been cancelled or terminated with a * (composite) error will be sent to the same global error handler. * Use {@link #mergeDelayError(MaybeSource, MaybeSource, MaybeSource)} to merge sources and terminate only when all source {@code MaybeSource}s * have completed or failed with an error. * </dd> * </dl> * * @param <T> the common value type * @param source1 * a MaybeSource to be merged * @param source2 * a MaybeSource to be merged * @param source3 * a MaybeSource to be merged * @return a Flowable that emits all of the items emitted by the source MaybeSources * @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a> * @see #mergeDelayError(MaybeSource, MaybeSource, MaybeSource) */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) @SuppressWarnings("unchecked") public static <T> Flowable<T> merge( MaybeSource<? extends T> source1, MaybeSource<? extends T> source2, MaybeSource<? extends T> source3 ) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); ObjectHelper.requireNonNull(source3, "source3 is null"); return mergeArray(source1, source2, source3); } /** * Flattens four MaybeSources into a single Flowable, without any transformation. * <p> * <img width="640" height="483" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.merge.png" alt=""> * <p> * You can combine items emitted by multiple MaybeSources so that they appear as a single Flowable, by using * the {@code merge} method. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code merge} does not operate by default on a particular {@link Scheduler}.</dd> * <dt><b>Error handling:</b></dt> * <dd>If any of the source {@code MaybeSource}s signal a {@code Throwable} via {@code onError}, the resulting * {@code Flowable} terminates with that {@code Throwable} and all other source {@code MaybeSource}s are disposed. * If more than one {@code MaybeSource} signals an error, the resulting {@code Flowable} may terminate with the * first one's error or, depending on the concurrency of the sources, may terminate with a * {@code CompositeException} containing two or more of the various error signals. * {@code Throwable}s that didn't make into the composite will be sent (individually) to the global error handler via * {@link RxJavaPlugins#onError(Throwable)} method as {@code UndeliverableException} errors. Similarly, {@code Throwable}s * signaled by source(s) after the returned {@code Flowable} has been cancelled or terminated with a * (composite) error will be sent to the same global error handler. * Use {@link #mergeDelayError(MaybeSource, MaybeSource, MaybeSource, MaybeSource)} to merge sources and terminate only when all source {@code MaybeSource}s * have completed or failed with an error. * </dd> * </dl> * * @param <T> the common value type * @param source1 * a MaybeSource to be merged * @param source2 * a MaybeSource to be merged * @param source3 * a MaybeSource to be merged * @param source4 * a MaybeSource to be merged * @return a Flowable that emits all of the items emitted by the source MaybeSources * @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a> * @see #mergeDelayError(MaybeSource, MaybeSource, MaybeSource, MaybeSource) */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) @SuppressWarnings("unchecked") public static <T> Flowable<T> merge( MaybeSource<? extends T> source1, MaybeSource<? extends T> source2, MaybeSource<? extends T> source3, MaybeSource<? extends T> source4 ) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); ObjectHelper.requireNonNull(source3, "source3 is null"); ObjectHelper.requireNonNull(source4, "source4 is null"); return mergeArray(source1, source2, source3, source4); } /** * Merges an array sequence of MaybeSource instances into a single Flowable sequence, * running all MaybeSources at once. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code mergeArray} does not operate by default on a particular {@link Scheduler}.</dd> * <dt><b>Error handling:</b></dt> * <dd>If any of the source {@code MaybeSource}s signal a {@code Throwable} via {@code onError}, the resulting * {@code Flowable} terminates with that {@code Throwable} and all other source {@code MaybeSource}s are disposed. * If more than one {@code MaybeSource} signals an error, the resulting {@code Flowable} may terminate with the * first one's error or, depending on the concurrency of the sources, may terminate with a * {@code CompositeException} containing two or more of the various error signals. * {@code Throwable}s that didn't make into the composite will be sent (individually) to the global error handler via * {@link RxJavaPlugins#onError(Throwable)} method as {@code UndeliverableException} errors. Similarly, {@code Throwable}s * signaled by source(s) after the returned {@code Flowable} has been cancelled or terminated with a * (composite) error will be sent to the same global error handler. * Use {@link #mergeArrayDelayError(MaybeSource...)} to merge sources and terminate only when all source {@code MaybeSource}s * have completed or failed with an error. * </dd> * </dl> * @param <T> the common and resulting value type * @param sources the array sequence of MaybeSource sources * @return the new Flowable instance * @see #mergeArrayDelayError(MaybeSource...) */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) @SuppressWarnings("unchecked") public static <T> Flowable<T> mergeArray(MaybeSource<? extends T>... sources) { ObjectHelper.requireNonNull(sources, "sources is null"); if (sources.length == 0) { return Flowable.empty(); } if (sources.length == 1) { return RxJavaPlugins.onAssembly(new MaybeToFlowable<T>((MaybeSource<T>)sources[0])); } return RxJavaPlugins.onAssembly(new MaybeMergeArray<T>(sources)); } /** * Flattens an array of MaybeSources into one Flowable, in a way that allows a Subscriber to receive all * successfully emitted items from each of the source MaybeSources without being interrupted by an error * notification from one of them. * <p> * This behaves like {@link #merge(Publisher)} except that if any of the merged MaybeSources notify of an * error via {@link Subscriber#onError onError}, {@code mergeDelayError} will refrain from propagating that * error notification until all of the merged MaybeSources have finished emitting items. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeDelayError.png" alt=""> * <p> * Even if multiple merged MaybeSources send {@code onError} notifications, {@code mergeDelayError} will only * invoke the {@code onError} method of its Subscribers once. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code mergeArrayDelayError} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> the common element base type * @param sources * the Iterable of MaybeSources * @return a Flowable that emits items that are the result of flattening the items emitted by the * MaybeSources in the Iterable * @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a> */ @SuppressWarnings({ "unchecked", "rawtypes" }) @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> mergeArrayDelayError(MaybeSource<? extends T>... sources) { if (sources.length == 0) { return Flowable.empty(); } return Flowable.fromArray(sources).flatMap((Function)MaybeToPublisher.instance(), true, sources.length); } /** * Flattens an Iterable of MaybeSources into one Flowable, in a way that allows a Subscriber to receive all * successfully emitted items from each of the source MaybeSources without being interrupted by an error * notification from one of them. * <p> * This behaves like {@link #merge(Publisher)} except that if any of the merged MaybeSources notify of an * error via {@link Subscriber#onError onError}, {@code mergeDelayError} will refrain from propagating that * error notification until all of the merged MaybeSources have finished emitting items. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeDelayError.png" alt=""> * <p> * Even if multiple merged MaybeSources send {@code onError} notifications, {@code mergeDelayError} will only * invoke the {@code onError} method of its Subscribers once. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code mergeDelayError} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> the common element base type * @param sources * the Iterable of MaybeSources * @return a Flowable that emits items that are the result of flattening the items emitted by the * MaybeSources in the Iterable * @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a> */ @SuppressWarnings({ "unchecked", "rawtypes" }) @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> mergeDelayError(Iterable<? extends MaybeSource<? extends T>> sources) { return Flowable.fromIterable(sources).flatMap((Function)MaybeToPublisher.instance(), true); } /** * Flattens a Publisher that emits MaybeSources into one Publisher, in a way that allows a Subscriber to * receive all successfully emitted items from all of the source MaybeSources without being interrupted by * an error notification from one of them or even the main Publisher. * <p> * This behaves like {@link #merge(Publisher)} except that if any of the merged MaybeSources notify of an * error via {@link Subscriber#onError onError}, {@code mergeDelayError} will refrain from propagating that * error notification until all of the merged MaybeSources and the main Publisher have finished emitting items. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeDelayError.png" alt=""> * <p> * Even if multiple merged Publishers send {@code onError} notifications, {@code mergeDelayError} will only * invoke the {@code onError} method of its Subscribers once. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream. The outer {@code Publisher} is consumed * in unbounded mode (i.e., no backpressure is applied to it).</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code mergeDelayError} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> the common element base type * @param sources * a Publisher that emits MaybeSources * @return a Flowable that emits all of the items emitted by the Publishers emitted by the * {@code source} Publisher * @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a> */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> mergeDelayError(Publisher<? extends MaybeSource<? extends T>> sources) { return mergeDelayError(sources, Integer.MAX_VALUE); } /** * Flattens a Publisher that emits MaybeSources into one Publisher, in a way that allows a Subscriber to * receive all successfully emitted items from all of the source MaybeSources without being interrupted by * an error notification from one of them or even the main Publisher as well as limiting the total number of active MaybeSources. * <p> * This behaves like {@link #merge(Publisher, int)} except that if any of the merged MaybeSources notify of an * error via {@link Subscriber#onError onError}, {@code mergeDelayError} will refrain from propagating that * error notification until all of the merged MaybeSources and the main Publisher have finished emitting items. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeDelayError.png" alt=""> * <p> * Even if multiple merged Publishers send {@code onError} notifications, {@code mergeDelayError} will only * invoke the {@code onError} method of its Subscribers once. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream. The outer {@code Publisher} is consumed * in unbounded mode (i.e., no backpressure is applied to it).</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code mergeDelayError} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * <p>History: 2.1.9 - experimental * @param <T> the common element base type * @param sources * a Publisher that emits MaybeSources * @param maxConcurrency the maximum number of active inner MaybeSources to be merged at a time * @return a Flowable that emits all of the items emitted by the Publishers emitted by the * {@code source} Publisher * @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a> * @since 2.2 */ @SuppressWarnings({ "unchecked", "rawtypes" }) @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> mergeDelayError(Publisher<? extends MaybeSource<? extends T>> sources, int maxConcurrency) { ObjectHelper.requireNonNull(sources, "source is null"); ObjectHelper.verifyPositive(maxConcurrency, "maxConcurrency"); return RxJavaPlugins.onAssembly(new FlowableFlatMapPublisher(sources, MaybeToPublisher.instance(), true, maxConcurrency, 1)); } /** * Flattens two MaybeSources into one Flowable, in a way that allows a Subscriber to receive all * successfully emitted items from each of the source MaybeSources without being interrupted by an error * notification from one of them. * <p> * This behaves like {@link #merge(MaybeSource, MaybeSource)} except that if any of the merged MaybeSources * notify of an error via {@link Subscriber#onError onError}, {@code mergeDelayError} will refrain from * propagating that error notification until all of the merged MaybeSources have finished emitting items. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeDelayError.png" alt=""> * <p> * Even if both merged MaybeSources send {@code onError} notifications, {@code mergeDelayError} will only * invoke the {@code onError} method of its Subscribers once. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code mergeDelayError} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> the common element base type * @param source1 * a MaybeSource to be merged * @param source2 * a MaybeSource to be merged * @return a Flowable that emits all of the items that are emitted by the two source MaybeSources * @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a> */ @SuppressWarnings({ "unchecked" }) @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> mergeDelayError(MaybeSource<? extends T> source1, MaybeSource<? extends T> source2) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); return mergeArrayDelayError(source1, source2); } /** * Flattens three MaybeSource into one Flowable, in a way that allows a Subscriber to receive all * successfully emitted items from all of the source MaybeSources without being interrupted by an error * notification from one of them. * <p> * This behaves like {@link #merge(MaybeSource, MaybeSource, MaybeSource)} except that if any of the merged * MaybeSources notify of an error via {@link Subscriber#onError onError}, {@code mergeDelayError} will refrain * from propagating that error notification until all of the merged MaybeSources have finished emitting * items. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeDelayError.png" alt=""> * <p> * Even if multiple merged MaybeSources send {@code onError} notifications, {@code mergeDelayError} will only * invoke the {@code onError} method of its Subscribers once. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code mergeDelayError} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> the common element base type * @param source1 * a MaybeSource to be merged * @param source2 * a MaybeSource to be merged * @param source3 * a MaybeSource to be merged * @return a Flowable that emits all of the items that are emitted by the source MaybeSources * @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a> */ @SuppressWarnings({ "unchecked" }) @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> mergeDelayError(MaybeSource<? extends T> source1, MaybeSource<? extends T> source2, MaybeSource<? extends T> source3) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); ObjectHelper.requireNonNull(source3, "source3 is null"); return mergeArrayDelayError(source1, source2, source3); } /** * Flattens four MaybeSources into one Flowable, in a way that allows a Subscriber to receive all * successfully emitted items from all of the source MaybeSources without being interrupted by an error * notification from one of them. * <p> * This behaves like {@link #merge(MaybeSource, MaybeSource, MaybeSource, MaybeSource)} except that if any of * the merged MaybeSources notify of an error via {@link Subscriber#onError onError}, {@code mergeDelayError} * will refrain from propagating that error notification until all of the merged MaybeSources have finished * emitting items. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeDelayError.png" alt=""> * <p> * Even if multiple merged MaybeSources send {@code onError} notifications, {@code mergeDelayError} will only * invoke the {@code onError} method of its Subscribers once. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code mergeDelayError} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> the common element base type * @param source1 * a MaybeSource to be merged * @param source2 * a MaybeSource to be merged * @param source3 * a MaybeSource to be merged * @param source4 * a MaybeSource to be merged * @return a Flowable that emits all of the items that are emitted by the source MaybeSources * @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a> */ @SuppressWarnings({ "unchecked" }) @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Flowable<T> mergeDelayError( MaybeSource<? extends T> source1, MaybeSource<? extends T> source2, MaybeSource<? extends T> source3, MaybeSource<? extends T> source4) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); ObjectHelper.requireNonNull(source3, "source3 is null"); ObjectHelper.requireNonNull(source4, "source4 is null"); return mergeArrayDelayError(source1, source2, source3, source4); } /** * Returns a Maybe that never sends any items or notifications to a {@link MaybeObserver}. * <p> * <img width="640" height="185" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/never.png" alt=""> * <p> * This Maybe is useful primarily for testing purposes. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code never} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> * the type of items (not) emitted by the Maybe * @return a Maybe that never emits any items or sends any notifications to a {@link MaybeObserver} * @see <a href="http://reactivex.io/documentation/operators/empty-never-throw.html">ReactiveX operators documentation: Never</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) @SuppressWarnings("unchecked") public static <T> Maybe<T> never() { return RxJavaPlugins.onAssembly((Maybe<T>)MaybeNever.INSTANCE); } /** * Returns a Single that emits a Boolean value that indicates whether two MaybeSource sequences are the * same by comparing the items emitted by each MaybeSource pairwise. * <p> * <img width="640" height="385" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/sequenceEqual.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code sequenceEqual} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param source1 * the first MaybeSource to compare * @param source2 * the second MaybeSource to compare * @param <T> * the type of items emitted by each MaybeSource * @return a Single that emits a Boolean value that indicates whether the two sequences are the same * @see <a href="http://reactivex.io/documentation/operators/sequenceequal.html">ReactiveX operators documentation: SequenceEqual</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public static <T> Single<Boolean> sequenceEqual(MaybeSource<? extends T> source1, MaybeSource<? extends T> source2) { return sequenceEqual(source1, source2, ObjectHelper.equalsPredicate()); } /** * Returns a Single that emits a Boolean value that indicates whether two MaybeSources are the * same by comparing the items emitted by each MaybeSource pairwise based on the results of a specified * equality function. * <p> * <img width="640" height="385" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/sequenceEqual.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code sequenceEqual} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param source1 * the first MaybeSource to compare * @param source2 * the second MaybeSource to compare * @param isEqual * a function used to compare items emitted by each MaybeSource * @param <T> * the type of items emitted by each MaybeSource * @return a Single that emits a Boolean value that indicates whether the two MaybeSource sequences * are the same according to the specified function * @see <a href="http://reactivex.io/documentation/operators/sequenceequal.html">ReactiveX operators documentation: SequenceEqual</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Single<Boolean> sequenceEqual(MaybeSource<? extends T> source1, MaybeSource<? extends T> source2, BiPredicate<? super T, ? super T> isEqual) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); ObjectHelper.requireNonNull(isEqual, "isEqual is null"); return RxJavaPlugins.onAssembly(new MaybeEqualSingle<T>(source1, source2, isEqual)); } /** * Returns a Maybe that emits {@code 0L} after a specified delay. * <p> * <img width="640" height="200" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/timer.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code timer} operates by default on the {@code computation} {@link Scheduler}.</dd> * </dl> * * @param delay * the initial delay before emitting a single {@code 0L} * @param unit * time units to use for {@code delay} * @return a Maybe that emits {@code 0L} after a specified delay * @see <a href="http://reactivex.io/documentation/operators/timer.html">ReactiveX operators documentation: Timer</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.COMPUTATION) public static Maybe<Long> timer(long delay, TimeUnit unit) { return timer(delay, unit, Schedulers.computation()); } /** * Returns a Maybe that emits {@code 0L} after a specified delay on a specified Scheduler. * <p> * <img width="640" height="200" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/timer.s.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>You specify which {@link Scheduler} this operator will use.</dd> * </dl> * * @param delay * the initial delay before emitting a single 0L * @param unit * time units to use for {@code delay} * @param scheduler * the {@link Scheduler} to use for scheduling the item * @return a Maybe that emits {@code 0L} after a specified delay, on a specified Scheduler * @see <a href="http://reactivex.io/documentation/operators/timer.html">ReactiveX operators documentation: Timer</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.CUSTOM) public static Maybe<Long> timer(long delay, TimeUnit unit, Scheduler scheduler) { ObjectHelper.requireNonNull(unit, "unit is null"); ObjectHelper.requireNonNull(scheduler, "scheduler is null"); return RxJavaPlugins.onAssembly(new MaybeTimer(Math.max(0L, delay), unit, scheduler)); } /** * <strong>Advanced use only:</strong> creates a Maybe instance without * any safeguards by using a callback that is called with a MaybeObserver. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code unsafeCreate} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the value type * @param onSubscribe the function that is called with the subscribing MaybeObserver * @return the new Maybe instance */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Maybe<T> unsafeCreate(MaybeSource<T> onSubscribe) { if (onSubscribe instanceof Maybe) { throw new IllegalArgumentException("unsafeCreate(Maybe) should be upgraded"); } ObjectHelper.requireNonNull(onSubscribe, "onSubscribe is null"); return RxJavaPlugins.onAssembly(new MaybeUnsafeCreate<T>(onSubscribe)); } /** * Constructs a Maybe that creates a dependent resource object which is disposed of when the * upstream terminates or the downstream calls dispose(). * <p> * <img width="640" height="400" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/using.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code using} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> the element type of the generated MaybeSource * @param <D> the type of the resource associated with the output sequence * @param resourceSupplier * the factory function to create a resource object that depends on the Maybe * @param sourceSupplier * the factory function to create a MaybeSource * @param resourceDisposer * the function that will dispose of the resource * @return the Maybe whose lifetime controls the lifetime of the dependent resource object * @see <a href="http://reactivex.io/documentation/operators/using.html">ReactiveX operators documentation: Using</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public static <T, D> Maybe<T> using(Callable<? extends D> resourceSupplier, Function<? super D, ? extends MaybeSource<? extends T>> sourceSupplier, Consumer<? super D> resourceDisposer) { return using(resourceSupplier, sourceSupplier, resourceDisposer, true); } /** * Constructs a Maybe that creates a dependent resource object which is disposed of just before * termination if you have set {@code disposeEagerly} to {@code true} and a downstream dispose() does not occur * before termination. Otherwise resource disposal will occur on call to dispose(). Eager disposal is * particularly appropriate for a synchronous Maybe that reuses resources. {@code disposeAction} will * only be called once per subscription. * <p> * <img width="640" height="400" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/using.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code using} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> the element type of the generated MaybeSource * @param <D> the type of the resource associated with the output sequence * @param resourceSupplier * the factory function to create a resource object that depends on the Maybe * @param sourceSupplier * the factory function to create a MaybeSource * @param resourceDisposer * the function that will dispose of the resource * @param eager * if {@code true} then disposal will happen either on a dispose() call or just before emission of * a terminal event ({@code onComplete} or {@code onError}). * @return the Maybe whose lifetime controls the lifetime of the dependent resource object * @see <a href="http://reactivex.io/documentation/operators/using.html">ReactiveX operators documentation: Using</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T, D> Maybe<T> using(Callable<? extends D> resourceSupplier, Function<? super D, ? extends MaybeSource<? extends T>> sourceSupplier, Consumer<? super D> resourceDisposer, boolean eager) { ObjectHelper.requireNonNull(resourceSupplier, "resourceSupplier is null"); ObjectHelper.requireNonNull(sourceSupplier, "sourceSupplier is null"); ObjectHelper.requireNonNull(resourceDisposer, "disposer is null"); return RxJavaPlugins.onAssembly(new MaybeUsing<T, D>(resourceSupplier, sourceSupplier, resourceDisposer, eager)); } /** * Wraps a MaybeSource instance into a new Maybe instance if not already a Maybe * instance. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code wrap} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <T> the value type * @param source the source to wrap * @return the Maybe wrapper or the source cast to Maybe (if possible) */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T> Maybe<T> wrap(MaybeSource<T> source) { if (source instanceof Maybe) { return RxJavaPlugins.onAssembly((Maybe<T>)source); } ObjectHelper.requireNonNull(source, "onSubscribe is null"); return RxJavaPlugins.onAssembly(new MaybeUnsafeCreate<T>(source)); } /** * Returns a Maybe that emits the results of a specified combiner function applied to combinations of * items emitted, in sequence, by an Iterable of other MaybeSources. * <p> * Note on method signature: since Java doesn't allow creating a generic array with {@code new T[]}, the * implementation of this operator has to create an {@code Object[]} instead. Unfortunately, a * {@code Function<Integer[], R>} passed to the method would trigger a {@code ClassCastException}. * * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt=""> * <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This * also means it is possible some sources may not get subscribed to at all. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> the common value type * @param <R> the zipped result type * @param sources * an Iterable of source MaybeSources * @param zipper * a function that, when applied to an item emitted by each of the source MaybeSources, results in * an item that will be emitted by the resulting Maybe * @return a Maybe that emits the zipped results * @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T, R> Maybe<R> zip(Iterable<? extends MaybeSource<? extends T>> sources, Function<? super Object[], ? extends R> zipper) { ObjectHelper.requireNonNull(zipper, "zipper is null"); ObjectHelper.requireNonNull(sources, "sources is null"); return RxJavaPlugins.onAssembly(new MaybeZipIterable<T, R>(sources, zipper)); } /** * Returns a Maybe that emits the results of a specified combiner function applied to combinations of * two items emitted, in sequence, by two other MaybeSources. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt=""> * <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This * also means it is possible some sources may not get subscribed to at all. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T1> the value type of the first source * @param <T2> the value type of the second source * @param <R> the zipped result type * @param source1 * the first source MaybeSource * @param source2 * a second source MaybeSource * @param zipper * a function that, when applied to an item emitted by each of the source MaybeSources, results * in an item that will be emitted by the resulting Maybe * @return a Maybe that emits the zipped results * @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a> */ @SuppressWarnings("unchecked") @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T1, T2, R> Maybe<R> zip( MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2, BiFunction<? super T1, ? super T2, ? extends R> zipper) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); return zipArray(Functions.toFunction(zipper), source1, source2); } /** * Returns a Maybe that emits the results of a specified combiner function applied to combinations of * three items emitted, in sequence, by three other MaybeSources. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt=""> * <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This * also means it is possible some sources may not get subscribed to at all. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T1> the value type of the first source * @param <T2> the value type of the second source * @param <T3> the value type of the third source * @param <R> the zipped result type * @param source1 * the first source MaybeSource * @param source2 * a second source MaybeSource * @param source3 * a third source MaybeSource * @param zipper * a function that, when applied to an item emitted by each of the source MaybeSources, results in * an item that will be emitted by the resulting Maybe * @return a Maybe that emits the zipped results * @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a> */ @SuppressWarnings("unchecked") @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T1, T2, T3, R> Maybe<R> zip( MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2, MaybeSource<? extends T3> source3, Function3<? super T1, ? super T2, ? super T3, ? extends R> zipper) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); ObjectHelper.requireNonNull(source3, "source3 is null"); return zipArray(Functions.toFunction(zipper), source1, source2, source3); } /** * Returns a Maybe that emits the results of a specified combiner function applied to combinations of * four items emitted, in sequence, by four other MaybeSources. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt=""> * <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This * also means it is possible some sources may not get subscribed to at all. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T1> the value type of the first source * @param <T2> the value type of the second source * @param <T3> the value type of the third source * @param <T4> the value type of the fourth source * @param <R> the zipped result type * @param source1 * the first source MaybeSource * @param source2 * a second source MaybeSource * @param source3 * a third source MaybeSource * @param source4 * a fourth source MaybeSource * @param zipper * a function that, when applied to an item emitted by each of the source MaybeSources, results in * an item that will be emitted by the resulting Maybe * @return a Maybe that emits the zipped results * @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a> */ @SuppressWarnings("unchecked") @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T1, T2, T3, T4, R> Maybe<R> zip( MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2, MaybeSource<? extends T3> source3, MaybeSource<? extends T4> source4, Function4<? super T1, ? super T2, ? super T3, ? super T4, ? extends R> zipper) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); ObjectHelper.requireNonNull(source3, "source3 is null"); ObjectHelper.requireNonNull(source4, "source4 is null"); return zipArray(Functions.toFunction(zipper), source1, source2, source3, source4); } /** * Returns a Maybe that emits the results of a specified combiner function applied to combinations of * five items emitted, in sequence, by five other MaybeSources. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt=""> * <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This * also means it is possible some sources may not get subscribed to at all. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T1> the value type of the first source * @param <T2> the value type of the second source * @param <T3> the value type of the third source * @param <T4> the value type of the fourth source * @param <T5> the value type of the fifth source * @param <R> the zipped result type * @param source1 * the first source MaybeSource * @param source2 * a second source MaybeSource * @param source3 * a third source MaybeSource * @param source4 * a fourth source MaybeSource * @param source5 * a fifth source MaybeSource * @param zipper * a function that, when applied to an item emitted by each of the source MaybeSources, results in * an item that will be emitted by the resulting Maybe * @return a Maybe that emits the zipped results * @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a> */ @SuppressWarnings("unchecked") @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T1, T2, T3, T4, T5, R> Maybe<R> zip( MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2, MaybeSource<? extends T3> source3, MaybeSource<? extends T4> source4, MaybeSource<? extends T5> source5, Function5<? super T1, ? super T2, ? super T3, ? super T4, ? super T5, ? extends R> zipper) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); ObjectHelper.requireNonNull(source3, "source3 is null"); ObjectHelper.requireNonNull(source4, "source4 is null"); ObjectHelper.requireNonNull(source5, "source5 is null"); return zipArray(Functions.toFunction(zipper), source1, source2, source3, source4, source5); } /** * Returns a Maybe that emits the results of a specified combiner function applied to combinations of * six items emitted, in sequence, by six other MaybeSources. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt=""> * <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This * also means it is possible some sources may not get subscribed to at all. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T1> the value type of the first source * @param <T2> the value type of the second source * @param <T3> the value type of the third source * @param <T4> the value type of the fourth source * @param <T5> the value type of the fifth source * @param <T6> the value type of the sixth source * @param <R> the zipped result type * @param source1 * the first source MaybeSource * @param source2 * a second source MaybeSource * @param source3 * a third source MaybeSource * @param source4 * a fourth source MaybeSource * @param source5 * a fifth source MaybeSource * @param source6 * a sixth source MaybeSource * @param zipper * a function that, when applied to an item emitted by each of the source MaybeSources, results in * an item that will be emitted by the resulting Maybe * @return a Maybe that emits the zipped results * @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a> */ @SuppressWarnings("unchecked") @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T1, T2, T3, T4, T5, T6, R> Maybe<R> zip( MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2, MaybeSource<? extends T3> source3, MaybeSource<? extends T4> source4, MaybeSource<? extends T5> source5, MaybeSource<? extends T6> source6, Function6<? super T1, ? super T2, ? super T3, ? super T4, ? super T5, ? super T6, ? extends R> zipper) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); ObjectHelper.requireNonNull(source3, "source3 is null"); ObjectHelper.requireNonNull(source4, "source4 is null"); ObjectHelper.requireNonNull(source5, "source5 is null"); ObjectHelper.requireNonNull(source6, "source6 is null"); return zipArray(Functions.toFunction(zipper), source1, source2, source3, source4, source5, source6); } /** * Returns a Maybe that emits the results of a specified combiner function applied to combinations of * seven items emitted, in sequence, by seven other MaybeSources. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt=""> * <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This * also means it is possible some sources may not get subscribed to at all. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T1> the value type of the first source * @param <T2> the value type of the second source * @param <T3> the value type of the third source * @param <T4> the value type of the fourth source * @param <T5> the value type of the fifth source * @param <T6> the value type of the sixth source * @param <T7> the value type of the seventh source * @param <R> the zipped result type * @param source1 * the first source MaybeSource * @param source2 * a second source MaybeSource * @param source3 * a third source MaybeSource * @param source4 * a fourth source MaybeSource * @param source5 * a fifth source MaybeSource * @param source6 * a sixth source MaybeSource * @param source7 * a seventh source MaybeSource * @param zipper * a function that, when applied to an item emitted by each of the source MaybeSources, results in * an item that will be emitted by the resulting Maybe * @return a Maybe that emits the zipped results * @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a> */ @SuppressWarnings("unchecked") @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T1, T2, T3, T4, T5, T6, T7, R> Maybe<R> zip( MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2, MaybeSource<? extends T3> source3, MaybeSource<? extends T4> source4, MaybeSource<? extends T5> source5, MaybeSource<? extends T6> source6, MaybeSource<? extends T7> source7, Function7<? super T1, ? super T2, ? super T3, ? super T4, ? super T5, ? super T6, ? super T7, ? extends R> zipper) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); ObjectHelper.requireNonNull(source3, "source3 is null"); ObjectHelper.requireNonNull(source4, "source4 is null"); ObjectHelper.requireNonNull(source5, "source5 is null"); ObjectHelper.requireNonNull(source6, "source6 is null"); ObjectHelper.requireNonNull(source7, "source7 is null"); return zipArray(Functions.toFunction(zipper), source1, source2, source3, source4, source5, source6, source7); } /** * Returns a Maybe that emits the results of a specified combiner function applied to combinations of * eight items emitted, in sequence, by eight other MaybeSources. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt=""> * <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This * also means it is possible some sources may not get subscribed to at all. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T1> the value type of the first source * @param <T2> the value type of the second source * @param <T3> the value type of the third source * @param <T4> the value type of the fourth source * @param <T5> the value type of the fifth source * @param <T6> the value type of the sixth source * @param <T7> the value type of the seventh source * @param <T8> the value type of the eighth source * @param <R> the zipped result type * @param source1 * the first source MaybeSource * @param source2 * a second source MaybeSource * @param source3 * a third source MaybeSource * @param source4 * a fourth source MaybeSource * @param source5 * a fifth source MaybeSource * @param source6 * a sixth source MaybeSource * @param source7 * a seventh source MaybeSource * @param source8 * an eighth source MaybeSource * @param zipper * a function that, when applied to an item emitted by each of the source MaybeSources, results in * an item that will be emitted by the resulting Maybe * @return a Maybe that emits the zipped results * @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a> */ @SuppressWarnings("unchecked") @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T1, T2, T3, T4, T5, T6, T7, T8, R> Maybe<R> zip( MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2, MaybeSource<? extends T3> source3, MaybeSource<? extends T4> source4, MaybeSource<? extends T5> source5, MaybeSource<? extends T6> source6, MaybeSource<? extends T7> source7, MaybeSource<? extends T8> source8, Function8<? super T1, ? super T2, ? super T3, ? super T4, ? super T5, ? super T6, ? super T7, ? super T8, ? extends R> zipper) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); ObjectHelper.requireNonNull(source3, "source3 is null"); ObjectHelper.requireNonNull(source4, "source4 is null"); ObjectHelper.requireNonNull(source5, "source5 is null"); ObjectHelper.requireNonNull(source6, "source6 is null"); ObjectHelper.requireNonNull(source7, "source7 is null"); ObjectHelper.requireNonNull(source8, "source8 is null"); return zipArray(Functions.toFunction(zipper), source1, source2, source3, source4, source5, source6, source7, source8); } /** * Returns a Maybe that emits the results of a specified combiner function applied to combinations of * nine items emitted, in sequence, by nine other MaybeSources. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code zip} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This * also means it is possible some sources may not get subscribed to at all. * * @param <T1> the value type of the first source * @param <T2> the value type of the second source * @param <T3> the value type of the third source * @param <T4> the value type of the fourth source * @param <T5> the value type of the fifth source * @param <T6> the value type of the sixth source * @param <T7> the value type of the seventh source * @param <T8> the value type of the eighth source * @param <T9> the value type of the ninth source * @param <R> the zipped result type * @param source1 * the first source MaybeSource * @param source2 * a second source MaybeSource * @param source3 * a third source MaybeSource * @param source4 * a fourth source MaybeSource * @param source5 * a fifth source MaybeSource * @param source6 * a sixth source MaybeSource * @param source7 * a seventh source MaybeSource * @param source8 * an eighth source MaybeSource * @param source9 * a ninth source MaybeSource * @param zipper * a function that, when applied to an item emitted by each of the source MaybeSources, results in * an item that will be emitted by the resulting MaybeSource * @return a Maybe that emits the zipped results * @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a> */ @SuppressWarnings("unchecked") @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T1, T2, T3, T4, T5, T6, T7, T8, T9, R> Maybe<R> zip( MaybeSource<? extends T1> source1, MaybeSource<? extends T2> source2, MaybeSource<? extends T3> source3, MaybeSource<? extends T4> source4, MaybeSource<? extends T5> source5, MaybeSource<? extends T6> source6, MaybeSource<? extends T7> source7, MaybeSource<? extends T8> source8, MaybeSource<? extends T9> source9, Function9<? super T1, ? super T2, ? super T3, ? super T4, ? super T5, ? super T6, ? super T7, ? super T8, ? super T9, ? extends R> zipper) { ObjectHelper.requireNonNull(source1, "source1 is null"); ObjectHelper.requireNonNull(source2, "source2 is null"); ObjectHelper.requireNonNull(source3, "source3 is null"); ObjectHelper.requireNonNull(source4, "source4 is null"); ObjectHelper.requireNonNull(source5, "source5 is null"); ObjectHelper.requireNonNull(source6, "source6 is null"); ObjectHelper.requireNonNull(source7, "source7 is null"); ObjectHelper.requireNonNull(source8, "source8 is null"); ObjectHelper.requireNonNull(source9, "source9 is null"); return zipArray(Functions.toFunction(zipper), source1, source2, source3, source4, source5, source6, source7, source8, source9); } /** * Returns a Maybe that emits the results of a specified combiner function applied to combinations of * items emitted, in sequence, by an array of other MaybeSources. * <p> * Note on method signature: since Java doesn't allow creating a generic array with {@code new T[]}, the * implementation of this operator has to create an {@code Object[]} instead. Unfortunately, a * {@code Function<Integer[], R>} passed to the method would trigger a {@code ClassCastException}. * * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt=""> * <p>This operator terminates eagerly if any of the source MaybeSources signal an onError or onComplete. This * also means it is possible some sources may not get subscribed to at all. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code zipArray} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <T> the common element type * @param <R> the result type * @param sources * an array of source MaybeSources * @param zipper * a function that, when applied to an item emitted by each of the source MaybeSources, results in * an item that will be emitted by the resulting MaybeSource * @return a Maybe that emits the zipped results * @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public static <T, R> Maybe<R> zipArray(Function<? super Object[], ? extends R> zipper, MaybeSource<? extends T>... sources) { ObjectHelper.requireNonNull(sources, "sources is null"); if (sources.length == 0) { return empty(); } ObjectHelper.requireNonNull(zipper, "zipper is null"); return RxJavaPlugins.onAssembly(new MaybeZipArray<T, R>(sources, zipper)); } // ------------------------------------------------------------------ // Instance methods // ------------------------------------------------------------------ /** * Mirrors the MaybeSource (current or provided) that first signals an event. * <p> * <img width="640" height="385" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/amb.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code ambWith} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param other * a MaybeSource competing to react first. A subscription to this provided source will occur after * subscribing to the current source. * @return a Maybe that emits the same sequence as whichever of the source MaybeSources first * signalled * @see <a href="http://reactivex.io/documentation/operators/amb.html">ReactiveX operators documentation: Amb</a> */ @SuppressWarnings("unchecked") @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> ambWith(MaybeSource<? extends T> other) { ObjectHelper.requireNonNull(other, "other is null"); return ambArray(this, other); } /** * Calls the specified converter function during assembly time and returns its resulting value. * <p> * This allows fluent conversion to any other type. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code as} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * <p>History: 2.1.7 - experimental * @param <R> the resulting object type * @param converter the function that receives the current Maybe instance and returns a value * @return the converted value * @throws NullPointerException if converter is null * @since 2.2 */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final <R> R as(@NonNull MaybeConverter<T, ? extends R> converter) { return ObjectHelper.requireNonNull(converter, "converter is null").apply(this); } /** * Waits in a blocking fashion until the current Maybe signals a success value (which is returned), * null if completed or an exception (which is propagated). * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code blockingGet} does not operate by default on a particular {@link Scheduler}.</dd> * <dt><b>Error handling:</b></dt> * <dd>If the source signals an error, the operator wraps a checked {@link Exception} * into {@link RuntimeException} and throws that. Otherwise, {@code RuntimeException}s and * {@link Error}s are rethrown as they are.</dd> * </dl> * @return the success value */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final T blockingGet() { BlockingMultiObserver<T> observer = new BlockingMultiObserver<T>(); subscribe(observer); return observer.blockingGet(); } /** * Waits in a blocking fashion until the current Maybe signals a success value (which is returned), * defaultValue if completed or an exception (which is propagated). * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code blockingGet} does not operate by default on a particular {@link Scheduler}.</dd> * <dt><b>Error handling:</b></dt> * <dd>If the source signals an error, the operator wraps a checked {@link Exception} * into {@link RuntimeException} and throws that. Otherwise, {@code RuntimeException}s and * {@link Error}s are rethrown as they are.</dd> * </dl> * @param defaultValue the default item to return if this Maybe is empty * @return the success value */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final T blockingGet(T defaultValue) { ObjectHelper.requireNonNull(defaultValue, "defaultValue is null"); BlockingMultiObserver<T> observer = new BlockingMultiObserver<T>(); subscribe(observer); return observer.blockingGet(defaultValue); } /** * Returns a Maybe that subscribes to this Maybe lazily, caches its event * and replays it, to all the downstream subscribers. * <p> * <img width="640" height="410" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/cache.png" alt=""> * <p> * The operator subscribes only when the first downstream subscriber subscribes and maintains * a single subscription towards this Maybe. * <p> * <em>Note:</em> You sacrifice the ability to dispose the origin when you use the {@code cache}. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code cache} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @return a Maybe that, when first subscribed to, caches all of its items and notifications for the * benefit of subsequent subscribers * @see <a href="http://reactivex.io/documentation/operators/replay.html">ReactiveX operators documentation: Replay</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> cache() { return RxJavaPlugins.onAssembly(new MaybeCache<T>(this)); } /** * Casts the success value of the current Maybe into the target type or signals a * ClassCastException if not compatible. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code cast} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <U> the target type * @param clazz the type token to use for casting the success result from the current Maybe * @return the new Maybe instance */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <U> Maybe<U> cast(final Class<? extends U> clazz) { ObjectHelper.requireNonNull(clazz, "clazz is null"); return map(Functions.castFunction(clazz)); } /** * Transform a Maybe by applying a particular Transformer function to it. * <p> * This method operates on the Maybe itself whereas {@link #lift} operates on the Maybe's MaybeObservers. * <p> * If the operator you are creating is designed to act on the individual item emitted by a Maybe, use * {@link #lift}. If your operator is designed to transform the source Maybe as a whole (for instance, by * applying a particular set of existing RxJava operators to it) use {@code compose}. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code compose} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <R> the value type of the Maybe returned by the transformer function * @param transformer the transformer function, not null * @return a Maybe, transformed by the transformer function * @see <a href="https://github.com/ReactiveX/RxJava/wiki/Implementing-Your-Own-Operators">RxJava wiki: Implementing Your Own Operators</a> */ @SuppressWarnings("unchecked") @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final <R> Maybe<R> compose(MaybeTransformer<? super T, ? extends R> transformer) { return wrap(((MaybeTransformer<T, R>) ObjectHelper.requireNonNull(transformer, "transformer is null")).apply(this)); } /** * Returns a Maybe that is based on applying a specified function to the item emitted by the source Maybe, * where that function returns a MaybeSource. * <p> * <img width="640" height="356" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMap.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code concatMap} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * <p>Note that flatMap and concatMap for Maybe is the same operation. * @param <R> the result value type * @param mapper * a function that, when applied to the item emitted by the source Maybe, returns a MaybeSource * @return the Maybe returned from {@code func} when applied to the item emitted by the source Maybe * @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <R> Maybe<R> concatMap(Function<? super T, ? extends MaybeSource<? extends R>> mapper) { ObjectHelper.requireNonNull(mapper, "mapper is null"); return RxJavaPlugins.onAssembly(new MaybeFlatten<T, R>(this, mapper)); } /** * Returns a Flowable that emits the items emitted from the current MaybeSource, then the next, one after * the other, without interleaving them. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/concat.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code concatWith} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param other * a MaybeSource to be concatenated after the current * @return a Flowable that emits items emitted by the two source MaybeSources, one after the other, * without interleaving them * @see <a href="http://reactivex.io/documentation/operators/concat.html">ReactiveX operators documentation: Concat</a> */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Flowable<T> concatWith(MaybeSource<? extends T> other) { ObjectHelper.requireNonNull(other, "other is null"); return concat(this, other); } /** * Returns a Single that emits a Boolean that indicates whether the source Maybe emitted a * specified item. * <p> * <img width="640" height="320" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/contains.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code contains} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param item * the item to search for in the emissions from the source Maybe, not null * @return a Single that emits {@code true} if the specified item is emitted by the source Maybe, * or {@code false} if the source Maybe completes without emitting that item * @see <a href="http://reactivex.io/documentation/operators/contains.html">ReactiveX operators documentation: Contains</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Single<Boolean> contains(final Object item) { ObjectHelper.requireNonNull(item, "item is null"); return RxJavaPlugins.onAssembly(new MaybeContains<T>(this, item)); } /** * Returns a Single that counts the total number of items emitted (0 or 1) by the source Maybe and emits * this count as a 64-bit Long. * <p> * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/longCount.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code count} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @return a Single that emits a single item: the number of items emitted by the source Maybe as a * 64-bit Long item * @see <a href="http://reactivex.io/documentation/operators/count.html">ReactiveX operators documentation: Count</a> * @see #count() */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Single<Long> count() { return RxJavaPlugins.onAssembly(new MaybeCount<T>(this)); } /** * Returns a Maybe that emits the item emitted by the source Maybe or a specified default item * if the source Maybe is empty. * <p> * Note that the result Maybe is semantically equivalent to a {@code Single}, since it's guaranteed * to emit exactly one item or an error. See {@link #toSingle(Object)} for a method with equivalent * behavior which returns a {@code Single}. * <p> * <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/defaultIfEmpty.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code defaultIfEmpty} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param defaultItem * the item to emit if the source Maybe emits no items * @return a Maybe that emits either the specified default item if the source Maybe emits no * items, or the items emitted by the source Maybe * @see <a href="http://reactivex.io/documentation/operators/defaultifempty.html">ReactiveX operators documentation: DefaultIfEmpty</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> defaultIfEmpty(T defaultItem) { ObjectHelper.requireNonNull(defaultItem, "defaultItem is null"); return switchIfEmpty(just(defaultItem)); } /** * Returns a Maybe that signals the events emitted by the source Maybe shifted forward in time by a * specified delay. * <p> * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/delay.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>This version of {@code delay} operates by default on the {@code computation} {@link Scheduler}.</dd> * </dl> * * @param delay * the delay to shift the source by * @param unit * the {@link TimeUnit} in which {@code period} is defined * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/delay.html">ReactiveX operators documentation: Delay</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.COMPUTATION) public final Maybe<T> delay(long delay, TimeUnit unit) { return delay(delay, unit, Schedulers.computation()); } /** * Returns a Maybe that signals the events emitted by the source Maybe shifted forward in time by a * specified delay running on the specified Scheduler. * <p> * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/delay.s.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>you specify which {@link Scheduler} this operator will use.</dd> * </dl> * * @param delay * the delay to shift the source by * @param unit * the time unit of {@code delay} * @param scheduler * the {@link Scheduler} to use for delaying * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/delay.html">ReactiveX operators documentation: Delay</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.CUSTOM) public final Maybe<T> delay(long delay, TimeUnit unit, Scheduler scheduler) { ObjectHelper.requireNonNull(unit, "unit is null"); ObjectHelper.requireNonNull(scheduler, "scheduler is null"); return RxJavaPlugins.onAssembly(new MaybeDelay<T>(this, Math.max(0L, delay), unit, scheduler)); } /** * Delays the emission of this Maybe until the given Publisher signals an item or completes. * <p> * <img width="640" height="450" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/delay.oo.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The {@code delayIndicator} is consumed in an unbounded manner but is cancelled after * the first item it produces.</dd> * <dt><b>Scheduler:</b></dt> * <dd>This version of {@code delay} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <U> * the subscription delay value type (ignored) * @param <V> * the item delay value type (ignored) * @param delayIndicator * the Publisher that gets subscribed to when this Maybe signals an event and that * signal is emitted when the Publisher signals an item or completes * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/delay.html">ReactiveX operators documentation: Delay</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) @BackpressureSupport(BackpressureKind.UNBOUNDED_IN) public final <U, V> Maybe<T> delay(Publisher<U> delayIndicator) { ObjectHelper.requireNonNull(delayIndicator, "delayIndicator is null"); return RxJavaPlugins.onAssembly(new MaybeDelayOtherPublisher<T, U>(this, delayIndicator)); } /** * Returns a Maybe that delays the subscription to this Maybe * until the other Publisher emits an element or completes normally. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The {@code Publisher} source is consumed in an unbounded fashion (without applying backpressure).</dd> * <dt><b>Scheduler:</b></dt> * <dd>This method does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <U> the value type of the other Publisher, irrelevant * @param subscriptionIndicator the other Publisher that should trigger the subscription * to this Publisher. * @return a Maybe that delays the subscription to this Maybe * until the other Publisher emits an element or completes normally. */ @BackpressureSupport(BackpressureKind.UNBOUNDED_IN) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <U> Maybe<T> delaySubscription(Publisher<U> subscriptionIndicator) { ObjectHelper.requireNonNull(subscriptionIndicator, "subscriptionIndicator is null"); return RxJavaPlugins.onAssembly(new MaybeDelaySubscriptionOtherPublisher<T, U>(this, subscriptionIndicator)); } /** * Returns a Maybe that delays the subscription to the source Maybe by a given amount of time. * <p> * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/delaySubscription.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>This version of {@code delaySubscription} operates by default on the {@code computation} {@link Scheduler}.</dd> * </dl> * * @param delay * the time to delay the subscription * @param unit * the time unit of {@code delay} * @return a Maybe that delays the subscription to the source Maybe by the given amount * @see <a href="http://reactivex.io/documentation/operators/delay.html">ReactiveX operators documentation: Delay</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.COMPUTATION) public final Maybe<T> delaySubscription(long delay, TimeUnit unit) { return delaySubscription(delay, unit, Schedulers.computation()); } /** * Returns a Maybe that delays the subscription to the source Maybe by a given amount of time, * both waiting and subscribing on a given Scheduler. * <p> * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/delaySubscription.s.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>You specify which {@link Scheduler} this operator will use.</dd> * </dl> * * @param delay * the time to delay the subscription * @param unit * the time unit of {@code delay} * @param scheduler * the Scheduler on which the waiting and subscription will happen * @return a Maybe that delays the subscription to the source Maybe by a given * amount, waiting and subscribing on the given Scheduler * @see <a href="http://reactivex.io/documentation/operators/delay.html">ReactiveX operators documentation: Delay</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.CUSTOM) public final Maybe<T> delaySubscription(long delay, TimeUnit unit, Scheduler scheduler) { return delaySubscription(Flowable.timer(delay, unit, scheduler)); } /** * Calls the specified consumer with the success item after this item has been emitted to the downstream. * <p>Note that the {@code onAfterNext} action is shared between subscriptions and as such * should be thread-safe. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code doAfterSuccess} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * <p>History: 2.0.1 - experimental * @param onAfterSuccess the Consumer that will be called after emitting an item from upstream to the downstream * @return the new Maybe instance * @since 2.1 */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> doAfterSuccess(Consumer<? super T> onAfterSuccess) { ObjectHelper.requireNonNull(onAfterSuccess, "onAfterSuccess is null"); return RxJavaPlugins.onAssembly(new MaybeDoAfterSuccess<T>(this, onAfterSuccess)); } /** * Registers an {@link Action} to be called when this Maybe invokes either * {@link MaybeObserver#onComplete onSuccess}, * {@link MaybeObserver#onComplete onComplete} or {@link MaybeObserver#onError onError}. * <p> * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/finallyDo.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code doAfterTerminate} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param onAfterTerminate * an {@link Action} to be invoked when the source Maybe finishes * @return a Maybe that emits the same items as the source Maybe, then invokes the * {@link Action} * @see <a href="http://reactivex.io/documentation/operators/do.html">ReactiveX operators documentation: Do</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> doAfterTerminate(Action onAfterTerminate) { return RxJavaPlugins.onAssembly(new MaybePeek<T>(this, Functions.emptyConsumer(), // onSubscribe Functions.emptyConsumer(), // onSuccess Functions.emptyConsumer(), // onError Functions.EMPTY_ACTION, // onComplete ObjectHelper.requireNonNull(onAfterTerminate, "onAfterTerminate is null"), Functions.EMPTY_ACTION // dispose )); } /** * Calls the specified action after this Maybe signals onSuccess, onError or onComplete or gets disposed by * the downstream. * <p>In case of a race between a terminal event and a dispose call, the provided {@code onFinally} action * is executed once per subscription. * <p>Note that the {@code onFinally} action is shared between subscriptions and as such * should be thread-safe. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code doFinally} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * <p>History: 2.0.1 - experimental * @param onFinally the action called when this Maybe terminates or gets disposed * @return the new Maybe instance * @since 2.1 */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> doFinally(Action onFinally) { ObjectHelper.requireNonNull(onFinally, "onFinally is null"); return RxJavaPlugins.onAssembly(new MaybeDoFinally<T>(this, onFinally)); } /** * Calls the shared {@code Action} if a MaybeObserver subscribed to the current Maybe * disposes the common Disposable it received via onSubscribe. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code doOnDispose} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param onDispose the action called when the subscription is disposed * @throws NullPointerException if onDispose is null * @return the new Maybe instance */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> doOnDispose(Action onDispose) { return RxJavaPlugins.onAssembly(new MaybePeek<T>(this, Functions.emptyConsumer(), // onSubscribe Functions.emptyConsumer(), // onSuccess Functions.emptyConsumer(), // onError Functions.EMPTY_ACTION, // onComplete Functions.EMPTY_ACTION, // (onSuccess | onError | onComplete) after ObjectHelper.requireNonNull(onDispose, "onDispose is null") )); } /** * Modifies the source Maybe so that it invokes an action when it calls {@code onComplete}. * <p> * <img width="640" height="358" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/doOnComplete.m.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code doOnComplete} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param onComplete * the action to invoke when the source Maybe calls {@code onComplete} * @return the new Maybe with the side-effecting behavior applied * @see <a href="http://reactivex.io/documentation/operators/do.html">ReactiveX operators documentation: Do</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> doOnComplete(Action onComplete) { return RxJavaPlugins.onAssembly(new MaybePeek<T>(this, Functions.emptyConsumer(), // onSubscribe Functions.emptyConsumer(), // onSuccess Functions.emptyConsumer(), // onError ObjectHelper.requireNonNull(onComplete, "onComplete is null"), Functions.EMPTY_ACTION, // (onSuccess | onError | onComplete) Functions.EMPTY_ACTION // dispose )); } /** * Calls the shared consumer with the error sent via onError for each * MaybeObserver that subscribes to the current Maybe. * <p> * <img width="640" height="358" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/doOnError.m.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code doOnError} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param onError the consumer called with the success value of onError * @return the new Maybe instance */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> doOnError(Consumer<? super Throwable> onError) { return RxJavaPlugins.onAssembly(new MaybePeek<T>(this, Functions.emptyConsumer(), // onSubscribe Functions.emptyConsumer(), // onSuccess ObjectHelper.requireNonNull(onError, "onError is null"), Functions.EMPTY_ACTION, // onComplete Functions.EMPTY_ACTION, // (onSuccess | onError | onComplete) Functions.EMPTY_ACTION // dispose )); } /** * Calls the given onEvent callback with the (success value, null) for an onSuccess, (null, throwable) for * an onError or (null, null) for an onComplete signal from this Maybe before delivering said * signal to the downstream. * <p> * Exceptions thrown from the callback will override the event so the downstream receives the * error instead of the original signal. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code doOnEvent} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param onEvent the callback to call with the terminal event tuple * @return the new Maybe instance */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> doOnEvent(BiConsumer<? super T, ? super Throwable> onEvent) { ObjectHelper.requireNonNull(onEvent, "onEvent is null"); return RxJavaPlugins.onAssembly(new MaybeDoOnEvent<T>(this, onEvent)); } /** * Calls the shared consumer with the Disposable sent through the onSubscribe for each * MaybeObserver that subscribes to the current Maybe. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code doOnSubscribe} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param onSubscribe the consumer called with the Disposable sent via onSubscribe * @return the new Maybe instance */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> doOnSubscribe(Consumer<? super Disposable> onSubscribe) { return RxJavaPlugins.onAssembly(new MaybePeek<T>(this, ObjectHelper.requireNonNull(onSubscribe, "onSubscribe is null"), Functions.emptyConsumer(), // onSuccess Functions.emptyConsumer(), // onError Functions.EMPTY_ACTION, // onComplete Functions.EMPTY_ACTION, // (onSuccess | onError | onComplete) Functions.EMPTY_ACTION // dispose )); } /** * Returns a Maybe instance that calls the given onTerminate callback * just before this Maybe completes normally or with an exception. * <p> * <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/doOnTerminate.png" alt=""> * <p> * This differs from {@code doAfterTerminate} in that this happens <em>before</em> the {@code onComplete} or * {@code onError} notification. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code doOnTerminate} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param onTerminate the action to invoke when the consumer calls {@code onComplete} or {@code onError} * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/do.html">ReactiveX operators documentation: Do</a> * @see #doOnTerminate(Action) * @since 2.2.7 - experimental */ @Experimental @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> doOnTerminate(final Action onTerminate) { ObjectHelper.requireNonNull(onTerminate, "onTerminate is null"); return RxJavaPlugins.onAssembly(new MaybeDoOnTerminate<T>(this, onTerminate)); } /** * Calls the shared consumer with the success value sent via onSuccess for each * MaybeObserver that subscribes to the current Maybe. * <p> * <img width="640" height="358" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/doOnSuccess.m.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code doOnSuccess} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param onSuccess the consumer called with the success value of onSuccess * @return the new Maybe instance */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> doOnSuccess(Consumer<? super T> onSuccess) { return RxJavaPlugins.onAssembly(new MaybePeek<T>(this, Functions.emptyConsumer(), // onSubscribe ObjectHelper.requireNonNull(onSuccess, "onSuccess is null"), Functions.emptyConsumer(), // onError Functions.EMPTY_ACTION, // onComplete Functions.EMPTY_ACTION, // (onSuccess | onError | onComplete) Functions.EMPTY_ACTION // dispose )); } /** * Filters the success item of the Maybe via a predicate function and emitting it if the predicate * returns true, completing otherwise. * <p> * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/filter.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code filter} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param predicate * a function that evaluates the item emitted by the source Maybe, returning {@code true} * if it passes the filter * @return a Maybe that emit the item emitted by the source Maybe that the filter * evaluates as {@code true} * @see <a href="http://reactivex.io/documentation/operators/filter.html">ReactiveX operators documentation: Filter</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> filter(Predicate<? super T> predicate) { ObjectHelper.requireNonNull(predicate, "predicate is null"); return RxJavaPlugins.onAssembly(new MaybeFilter<T>(this, predicate)); } /** * Returns a Maybe that is based on applying a specified function to the item emitted by the source Maybe, * where that function returns a MaybeSource. * <p> * <img width="640" height="356" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMap.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code flatMap} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * <p>Note that flatMap and concatMap for Maybe is the same operation. * * @param <R> the result value type * @param mapper * a function that, when applied to the item emitted by the source Maybe, returns a MaybeSource * @return the Maybe returned from {@code func} when applied to the item emitted by the source Maybe * @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <R> Maybe<R> flatMap(Function<? super T, ? extends MaybeSource<? extends R>> mapper) { ObjectHelper.requireNonNull(mapper, "mapper is null"); return RxJavaPlugins.onAssembly(new MaybeFlatten<T, R>(this, mapper)); } /** * Maps the onSuccess, onError or onComplete signals of this Maybe into MaybeSource and emits that * MaybeSource's signals. * <p> * <img width="640" height="354" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMap.mmm.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code flatMap} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <R> * the result type * @param onSuccessMapper * a function that returns a MaybeSource to merge for the onSuccess item emitted by this Maybe * @param onErrorMapper * a function that returns a MaybeSource to merge for an onError notification from this Maybe * @param onCompleteSupplier * a function that returns a MaybeSource to merge for an onComplete notification this Maybe * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <R> Maybe<R> flatMap( Function<? super T, ? extends MaybeSource<? extends R>> onSuccessMapper, Function<? super Throwable, ? extends MaybeSource<? extends R>> onErrorMapper, Callable<? extends MaybeSource<? extends R>> onCompleteSupplier) { ObjectHelper.requireNonNull(onSuccessMapper, "onSuccessMapper is null"); ObjectHelper.requireNonNull(onErrorMapper, "onErrorMapper is null"); ObjectHelper.requireNonNull(onCompleteSupplier, "onCompleteSupplier is null"); return RxJavaPlugins.onAssembly(new MaybeFlatMapNotification<T, R>(this, onSuccessMapper, onErrorMapper, onCompleteSupplier)); } /** * Returns a Maybe that emits the results of a specified function to the pair of values emitted by the * source Maybe and a specified mapped MaybeSource. * <p> * <img width="640" height="390" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/mergeMap.r.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code flatMap} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <U> * the type of items emitted by the MaybeSource returned by the {@code mapper} function * @param <R> * the type of items emitted by the resulting Maybe * @param mapper * a function that returns a MaybeSource for the item emitted by the source Maybe * @param resultSelector * a function that combines one item emitted by each of the source and collection MaybeSource and * returns an item to be emitted by the resulting MaybeSource * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <U, R> Maybe<R> flatMap(Function<? super T, ? extends MaybeSource<? extends U>> mapper, BiFunction<? super T, ? super U, ? extends R> resultSelector) { ObjectHelper.requireNonNull(mapper, "mapper is null"); ObjectHelper.requireNonNull(resultSelector, "resultSelector is null"); return RxJavaPlugins.onAssembly(new MaybeFlatMapBiSelector<T, U, R>(this, mapper, resultSelector)); } /** * Maps the success value of the upstream {@link Maybe} into an {@link Iterable} and emits its items as a * {@link Flowable} sequence. * <p> * <img width="640" height="373" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/flattenAsFlowable.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code flattenAsFlowable} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <U> * the type of item emitted by the resulting Iterable * @param mapper * a function that returns an Iterable sequence of values for when given an item emitted by the * source Maybe * @return the new Flowable instance * @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a> */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <U> Flowable<U> flattenAsFlowable(final Function<? super T, ? extends Iterable<? extends U>> mapper) { ObjectHelper.requireNonNull(mapper, "mapper is null"); return RxJavaPlugins.onAssembly(new MaybeFlatMapIterableFlowable<T, U>(this, mapper)); } /** * Maps the success value of the upstream {@link Maybe} into an {@link Iterable} and emits its items as an * {@link Observable} sequence. * <p> * <img width="640" height="373" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/flattenAsObservable.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code flattenAsObservable} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <U> * the type of item emitted by the resulting Iterable * @param mapper * a function that returns an Iterable sequence of values for when given an item emitted by the * source Maybe * @return the new Observable instance * @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <U> Observable<U> flattenAsObservable(final Function<? super T, ? extends Iterable<? extends U>> mapper) { ObjectHelper.requireNonNull(mapper, "mapper is null"); return RxJavaPlugins.onAssembly(new MaybeFlatMapIterableObservable<T, U>(this, mapper)); } /** * Returns an Observable that is based on applying a specified function to the item emitted by the source Maybe, * where that function returns an ObservableSource. * <p> * <img width="640" height="356" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMap.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code flatMapObservable} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <R> the result value type * @param mapper * a function that, when applied to the item emitted by the source Maybe, returns an ObservableSource * @return the Observable returned from {@code func} when applied to the item emitted by the source Maybe * @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <R> Observable<R> flatMapObservable(Function<? super T, ? extends ObservableSource<? extends R>> mapper) { ObjectHelper.requireNonNull(mapper, "mapper is null"); return RxJavaPlugins.onAssembly(new MaybeFlatMapObservable<T, R>(this, mapper)); } /** * Returns a Flowable that emits items based on applying a specified function to the item emitted by the * source Maybe, where that function returns a Publisher. * <p> * <img width="640" height="260" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMapPublisher.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The returned Flowable honors the downstream backpressure.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code flatMapPublisher} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <R> the result value type * @param mapper * a function that, when applied to the item emitted by the source Maybe, returns a * Flowable * @return the Flowable returned from {@code func} when applied to the item emitted by the source Maybe * @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a> */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <R> Flowable<R> flatMapPublisher(Function<? super T, ? extends Publisher<? extends R>> mapper) { ObjectHelper.requireNonNull(mapper, "mapper is null"); return RxJavaPlugins.onAssembly(new MaybeFlatMapPublisher<T, R>(this, mapper)); } /** * Returns a {@link Single} based on applying a specified function to the item emitted by the * source {@link Maybe}, where that function returns a {@link Single}. * When this Maybe completes a {@link NoSuchElementException} will be thrown. * <p> * <img width="640" height="356" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMapSingle.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code flatMapSingle} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <R> the result value type * @param mapper * a function that, when applied to the item emitted by the source Maybe, returns a * Single * @return the Single returned from {@code mapper} when applied to the item emitted by the source Maybe * @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <R> Single<R> flatMapSingle(final Function<? super T, ? extends SingleSource<? extends R>> mapper) { ObjectHelper.requireNonNull(mapper, "mapper is null"); return RxJavaPlugins.onAssembly(new MaybeFlatMapSingle<T, R>(this, mapper)); } /** * Returns a {@link Maybe} based on applying a specified function to the item emitted by the * source {@link Maybe}, where that function returns a {@link Single}. * When this Maybe just completes the resulting {@code Maybe} completes as well. * <p> * <img width="640" height="356" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMapSingle.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code flatMapSingleElement} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * <p>History: 2.0.2 - experimental * @param <R> the result value type * @param mapper * a function that, when applied to the item emitted by the source Maybe, returns a * Single * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a> * @since 2.1 */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <R> Maybe<R> flatMapSingleElement(final Function<? super T, ? extends SingleSource<? extends R>> mapper) { ObjectHelper.requireNonNull(mapper, "mapper is null"); return RxJavaPlugins.onAssembly(new MaybeFlatMapSingleElement<T, R>(this, mapper)); } /** * Returns a {@link Completable} that completes based on applying a specified function to the item emitted by the * source {@link Maybe}, where that function returns a {@link Completable}. * <p> * <img width="640" height="267" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.flatMapCompletable.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code flatMapCompletable} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param mapper * a function that, when applied to the item emitted by the source Maybe, returns a * Completable * @return the Completable returned from {@code mapper} when applied to the item emitted by the source Maybe * @see <a href="http://reactivex.io/documentation/operators/flatmap.html">ReactiveX operators documentation: FlatMap</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Completable flatMapCompletable(final Function<? super T, ? extends CompletableSource> mapper) { ObjectHelper.requireNonNull(mapper, "mapper is null"); return RxJavaPlugins.onAssembly(new MaybeFlatMapCompletable<T>(this, mapper)); } /** * Hides the identity of this Maybe and its Disposable. * <p> * <img width="640" height="300" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.hide.png" alt=""> * <p>Allows preventing certain identity-based * optimizations (fusion). * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code hide} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @return the new Maybe instance */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> hide() { return RxJavaPlugins.onAssembly(new MaybeHide<T>(this)); } /** * Ignores the item emitted by the source Maybe and only calls {@code onComplete} or {@code onError}. * <p> * <img width="640" height="389" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.ignoreElement.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code ignoreElement} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @return an empty Completable that only calls {@code onComplete} or {@code onError}, based on which one is * called by the source Maybe * @see <a href="http://reactivex.io/documentation/operators/ignoreelements.html">ReactiveX operators documentation: IgnoreElements</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Completable ignoreElement() { return RxJavaPlugins.onAssembly(new MaybeIgnoreElementCompletable<T>(this)); } /** * Returns a Single that emits {@code true} if the source Maybe is empty, otherwise {@code false}. * <p> * <img width="640" height="320" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/isEmpty.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code isEmpty} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @return a Single that emits a Boolean * @see <a href="http://reactivex.io/documentation/operators/contains.html">ReactiveX operators documentation: Contains</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Single<Boolean> isEmpty() { return RxJavaPlugins.onAssembly(new MaybeIsEmptySingle<T>(this)); } /** * <strong>This method requires advanced knowledge about building operators, please consider * other standard composition methods first;</strong> * Returns a {@code Maybe} which, when subscribed to, invokes the {@link MaybeOperator#apply(MaybeObserver) apply(MaybeObserver)} method * of the provided {@link MaybeOperator} for each individual downstream {@link Maybe} and allows the * insertion of a custom operator by accessing the downstream's {@link MaybeObserver} during this subscription phase * and providing a new {@code MaybeObserver}, containing the custom operator's intended business logic, that will be * used in the subscription process going further upstream. * <p> * Generally, such a new {@code MaybeObserver} will wrap the downstream's {@code MaybeObserver} and forwards the * {@code onSuccess}, {@code onError} and {@code onComplete} events from the upstream directly or according to the * emission pattern the custom operator's business logic requires. In addition, such operator can intercept the * flow control calls of {@code dispose} and {@code isDisposed} that would have traveled upstream and perform * additional actions depending on the same business logic requirements. * <p> * Example: * <pre><code> * // Step 1: Create the consumer type that will be returned by the MaybeOperator.apply(): * * public final class CustomMaybeObserver&lt;T&gt; implements MaybeObserver&lt;T&gt;, Disposable { * * // The downstream's MaybeObserver that will receive the onXXX events * final MaybeObserver&lt;? super String&gt; downstream; * * // The connection to the upstream source that will call this class' onXXX methods * Disposable upstream; * * // The constructor takes the downstream subscriber and usually any other parameters * public CustomMaybeObserver(MaybeObserver&lt;? super String&gt; downstream) { * this.downstream = downstream; * } * * // In the subscription phase, the upstream sends a Disposable to this class * // and subsequently this class has to send a Disposable to the downstream. * // Note that relaying the upstream's Disposable directly is not allowed in RxJava * &#64;Override * public void onSubscribe(Disposable d) { * if (upstream != null) { * d.dispose(); * } else { * upstream = d; * downstream.onSubscribe(this); * } * } * * // The upstream calls this with the next item and the implementation's * // responsibility is to emit an item to the downstream based on the intended * // business logic, or if it can't do so for the particular item, * // request more from the upstream * &#64;Override * public void onSuccess(T item) { * String str = item.toString(); * if (str.length() &lt; 2) { * downstream.onSuccess(str); * } else { * // Maybe is usually expected to produce one of the onXXX events * downstream.onComplete(); * } * } * * // Some operators may handle the upstream's error while others * // could just forward it to the downstream. * &#64;Override * public void onError(Throwable throwable) { * downstream.onError(throwable); * } * * // When the upstream completes, usually the downstream should complete as well. * &#64;Override * public void onComplete() { * downstream.onComplete(); * } * * // Some operators may use their own resources which should be cleaned up if * // the downstream disposes the flow before it completed. Operators without * // resources can simply forward the dispose to the upstream. * // In some cases, a disposed flag may be set by this method so that other parts * // of this class may detect the dispose and stop sending events * // to the downstream. * &#64;Override * public void dispose() { * upstream.dispose(); * } * * // Some operators may simply forward the call to the upstream while others * // can return the disposed flag set in dispose(). * &#64;Override * public boolean isDisposed() { * return upstream.isDisposed(); * } * } * * // Step 2: Create a class that implements the MaybeOperator interface and * // returns the custom consumer type from above in its apply() method. * // Such class may define additional parameters to be submitted to * // the custom consumer type. * * final class CustomMaybeOperator&lt;T&gt; implements MaybeOperator&lt;String&gt; { * &#64;Override * public MaybeObserver&lt;? super String&gt; apply(MaybeObserver&lt;? super T&gt; upstream) { * return new CustomMaybeObserver&lt;T&gt;(upstream); * } * } * * // Step 3: Apply the custom operator via lift() in a flow by creating an instance of it * // or reusing an existing one. * * Maybe.just(5) * .lift(new CustomMaybeOperator&lt;Integer&gt;()) * .test() * .assertResult("5"); * * Maybe.just(15) * .lift(new CustomMaybeOperator&lt;Integer&gt;()) * .test() * .assertResult(); * </code></pre> * <p> * Creating custom operators can be complicated and it is recommended one consults the * <a href="https://github.com/ReactiveX/RxJava/wiki/Writing-operators-for-2.0">RxJava wiki: Writing operators</a> page about * the tools, requirements, rules, considerations and pitfalls of implementing them. * <p> * Note that implementing custom operators via this {@code lift()} method adds slightly more overhead by requiring * an additional allocation and indirection per assembled flows. Instead, extending the abstract {@code Maybe} * class and creating a {@link MaybeTransformer} with it is recommended. * <p> * Note also that it is not possible to stop the subscription phase in {@code lift()} as the {@code apply()} method * requires a non-null {@code MaybeObserver} instance to be returned, which is then unconditionally subscribed to * the upstream {@code Maybe}. For example, if the operator decided there is no reason to subscribe to the * upstream source because of some optimization possibility or a failure to prepare the operator, it still has to * return a {@code MaybeObserver} that should immediately dispose the upstream's {@code Disposable} in its * {@code onSubscribe} method. Again, using a {@code MaybeTransformer} and extending the {@code Maybe} is * a better option as {@link #subscribeActual} can decide to not subscribe to its upstream after all. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code lift} does not operate by default on a particular {@link Scheduler}, however, the * {@link MaybeOperator} may use a {@code Scheduler} to support its own asynchronous behavior.</dd> * </dl> * * @param <R> the output value type * @param lift the {@link MaybeOperator} that receives the downstream's {@code MaybeObserver} and should return * a {@code MaybeObserver} with custom behavior to be used as the consumer for the current * {@code Maybe}. * @return the new Maybe instance * @see <a href="https://github.com/ReactiveX/RxJava/wiki/Writing-operators-for-2.0">RxJava wiki: Writing operators</a> * @see #compose(MaybeTransformer) */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <R> Maybe<R> lift(final MaybeOperator<? extends R, ? super T> lift) { ObjectHelper.requireNonNull(lift, "lift is null"); return RxJavaPlugins.onAssembly(new MaybeLift<T, R>(this, lift)); } /** * Returns a Maybe that applies a specified function to the item emitted by the source Maybe and * emits the result of this function application. * <p> * <img width="640" height="515" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.map.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code map} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <R> the result value type * @param mapper * a function to apply to the item emitted by the Maybe * @return a Maybe that emits the item from the source Maybe, transformed by the specified function * @see <a href="http://reactivex.io/documentation/operators/map.html">ReactiveX operators documentation: Map</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <R> Maybe<R> map(Function<? super T, ? extends R> mapper) { ObjectHelper.requireNonNull(mapper, "mapper is null"); return RxJavaPlugins.onAssembly(new MaybeMap<T, R>(this, mapper)); } /** * Maps the signal types of this Maybe into a {@link Notification} of the same kind * and emits it as a single success value to downstream. * <p> * <img width="640" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/materialize.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code materialize} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @return the new Single instance * @since 2.2.4 - experimental * @see Single#dematerialize(Function) */ @Experimental @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Single<Notification<T>> materialize() { return RxJavaPlugins.onAssembly(new MaybeMaterialize<T>(this)); } /** * Flattens this and another Maybe into a single Flowable, without any transformation. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/merge.png" alt=""> * <p> * You can combine items emitted by multiple Maybes so that they appear as a single Flowable, by * using the {@code mergeWith} method. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors backpressure from downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code mergeWith} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param other * a MaybeSource to be merged * @return a new Flowable instance * @see <a href="http://reactivex.io/documentation/operators/merge.html">ReactiveX operators documentation: Merge</a> */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Flowable<T> mergeWith(MaybeSource<? extends T> other) { ObjectHelper.requireNonNull(other, "other is null"); return merge(this, other); } /** * Wraps a Maybe to emit its item (or notify of its error) on a specified {@link Scheduler}, * asynchronously. * <p> * <img width="640" height="182" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.observeOn.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>you specify which {@link Scheduler} this operator will use.</dd> * </dl> * * @param scheduler * the {@link Scheduler} to notify subscribers on * @return the new Maybe instance that its subscribers are notified on the specified * {@link Scheduler} * @see <a href="http://reactivex.io/documentation/operators/observeon.html">ReactiveX operators documentation: ObserveOn</a> * @see <a href="http://www.grahamlea.com/2014/07/rxjava-threading-examples/">RxJava Threading Examples</a> * @see #subscribeOn */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.CUSTOM) public final Maybe<T> observeOn(final Scheduler scheduler) { ObjectHelper.requireNonNull(scheduler, "scheduler is null"); return RxJavaPlugins.onAssembly(new MaybeObserveOn<T>(this, scheduler)); } /** * Filters the items emitted by a Maybe, only emitting its success value if that * is an instance of the supplied Class. * <p> * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/ofClass.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code ofType} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <U> the output type * @param clazz * the class type to filter the items emitted by the source Maybe * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/filter.html">ReactiveX operators documentation: Filter</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <U> Maybe<U> ofType(final Class<U> clazz) { ObjectHelper.requireNonNull(clazz, "clazz is null"); return filter(Functions.isInstanceOf(clazz)).cast(clazz); } /** * Calls the specified converter function with the current Maybe instance * during assembly time and returns its result. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code to} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <R> the result type * @param convert the function that is called with the current Maybe instance during * assembly time that should return some value to be the result * * @return the value returned by the convert function */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <R> R to(Function<? super Maybe<T>, R> convert) { try { return ObjectHelper.requireNonNull(convert, "convert is null").apply(this); } catch (Throwable ex) { Exceptions.throwIfFatal(ex); throw ExceptionHelper.wrapOrThrow(ex); } } /** * Converts this Maybe into a backpressure-aware Flowable instance composing cancellation * through. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The returned Flowable honors the backpressure of the downstream.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code toFlowable} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @return the new Flowable instance */ @SuppressWarnings("unchecked") @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Flowable<T> toFlowable() { if (this instanceof FuseToFlowable) { return ((FuseToFlowable<T>)this).fuseToFlowable(); } return RxJavaPlugins.onAssembly(new MaybeToFlowable<T>(this)); } /** * Converts this Maybe into an Observable instance composing disposal * through. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code toObservable} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @return the new Observable instance */ @SuppressWarnings("unchecked") @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Observable<T> toObservable() { if (this instanceof FuseToObservable) { return ((FuseToObservable<T>)this).fuseToObservable(); } return RxJavaPlugins.onAssembly(new MaybeToObservable<T>(this)); } /** * Converts this Maybe into a Single instance composing disposal * through and turning an empty Maybe into a Single that emits the given * value through onSuccess. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code toSingle} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param defaultValue the default item to signal in Single if this Maybe is empty * @return the new Single instance */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Single<T> toSingle(T defaultValue) { ObjectHelper.requireNonNull(defaultValue, "defaultValue is null"); return RxJavaPlugins.onAssembly(new MaybeToSingle<T>(this, defaultValue)); } /** * Converts this Maybe into a Single instance composing disposal * through and turning an empty Maybe into a signal of NoSuchElementException. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code toSingle} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @return the new Single instance */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Single<T> toSingle() { return RxJavaPlugins.onAssembly(new MaybeToSingle<T>(this, null)); } /** * Returns a Maybe instance that if this Maybe emits an error, it will emit an onComplete * and swallow the throwable. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code onErrorComplete} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @return the new Maybe instance */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> onErrorComplete() { return onErrorComplete(Functions.alwaysTrue()); } /** * Returns a Maybe instance that if this Maybe emits an error and the predicate returns * true, it will emit an onComplete and swallow the throwable. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code onErrorComplete} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param predicate the predicate to call when an Throwable is emitted which should return true * if the Throwable should be swallowed and replaced with an onComplete. * @return the new Maybe instance */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> onErrorComplete(final Predicate<? super Throwable> predicate) { ObjectHelper.requireNonNull(predicate, "predicate is null"); return RxJavaPlugins.onAssembly(new MaybeOnErrorComplete<T>(this, predicate)); } /** * Instructs a Maybe to pass control to another {@link MaybeSource} rather than invoking * {@link MaybeObserver#onError onError} if it encounters an error. * <p> * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/onErrorResumeNext.png" alt=""> * <p> * You can use this to prevent errors from propagating or to supply fallback data should errors be * encountered. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code onErrorResumeNext} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param next * the next {@code MaybeSource} that will take over if the source Maybe encounters * an error * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX operators documentation: Catch</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> onErrorResumeNext(final MaybeSource<? extends T> next) { ObjectHelper.requireNonNull(next, "next is null"); return onErrorResumeNext(Functions.justFunction(next)); } /** * Instructs a Maybe to pass control to another Maybe rather than invoking * {@link MaybeObserver#onError onError} if it encounters an error. * <p> * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/onErrorResumeNext.png" alt=""> * <p> * You can use this to prevent errors from propagating or to supply fallback data should errors be * encountered. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code onErrorResumeNext} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param resumeFunction * a function that returns a MaybeSource that will take over if the source Maybe encounters * an error * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX operators documentation: Catch</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> onErrorResumeNext(Function<? super Throwable, ? extends MaybeSource<? extends T>> resumeFunction) { ObjectHelper.requireNonNull(resumeFunction, "resumeFunction is null"); return RxJavaPlugins.onAssembly(new MaybeOnErrorNext<T>(this, resumeFunction, true)); } /** * Instructs a Maybe to emit an item (returned by a specified function) rather than invoking * {@link MaybeObserver#onError onError} if it encounters an error. * <p> * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/onErrorReturn.png" alt=""> * <p> * You can use this to prevent errors from propagating or to supply fallback data should errors be * encountered. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code onErrorReturn} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param valueSupplier * a function that returns a single value that will be emitted as success value * the current Maybe signals an onError event * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX operators documentation: Catch</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> onErrorReturn(Function<? super Throwable, ? extends T> valueSupplier) { ObjectHelper.requireNonNull(valueSupplier, "valueSupplier is null"); return RxJavaPlugins.onAssembly(new MaybeOnErrorReturn<T>(this, valueSupplier)); } /** * Instructs a Maybe to emit an item (returned by a specified function) rather than invoking * {@link MaybeObserver#onError onError} if it encounters an error. * <p> * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/onErrorReturn.png" alt=""> * <p> * You can use this to prevent errors from propagating or to supply fallback data should errors be * encountered. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code onErrorReturnItem} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param item * the value that is emitted as onSuccess in case this Maybe signals an onError * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX operators documentation: Catch</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> onErrorReturnItem(final T item) { ObjectHelper.requireNonNull(item, "item is null"); return onErrorReturn(Functions.justFunction(item)); } /** * Instructs a Maybe to pass control to another MaybeSource rather than invoking * {@link MaybeObserver#onError onError} if it encounters an {@link java.lang.Exception}. * <p> * This differs from {@link #onErrorResumeNext} in that this one does not handle {@link java.lang.Throwable} * or {@link java.lang.Error} but lets those continue through. * <p> * <img width="640" height="333" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/onExceptionResumeNextViaMaybe.png" alt=""> * <p> * You can use this to prevent exceptions from propagating or to supply fallback data should exceptions be * encountered. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code onExceptionResumeNext} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param next * the next MaybeSource that will take over if the source Maybe encounters * an exception * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/catch.html">ReactiveX operators documentation: Catch</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> onExceptionResumeNext(final MaybeSource<? extends T> next) { ObjectHelper.requireNonNull(next, "next is null"); return RxJavaPlugins.onAssembly(new MaybeOnErrorNext<T>(this, Functions.justFunction(next), false)); } /** * Nulls out references to the upstream producer and downstream MaybeObserver if * the sequence is terminated or downstream calls dispose(). * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code onTerminateDetach} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @return a Maybe which nulls out references to the upstream producer and downstream MaybeObserver if * the sequence is terminated or downstream calls dispose() */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> onTerminateDetach() { return RxJavaPlugins.onAssembly(new MaybeDetach<T>(this)); } /** * Returns a Flowable that repeats the sequence of items emitted by the source Maybe indefinitely. * <p> * <img width="640" height="309" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/repeat.o.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors downstream backpressure.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code repeat} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @return a Flowable that emits the items emitted by the source Maybe repeatedly and in sequence * @see <a href="http://reactivex.io/documentation/operators/repeat.html">ReactiveX operators documentation: Repeat</a> */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Flowable<T> repeat() { return repeat(Long.MAX_VALUE); } /** * Returns a Flowable that repeats the sequence of items emitted by the source Maybe at most * {@code count} times. * <p> * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/repeat.on.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>This operator honors downstream backpressure.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code repeat} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param times * the number of times the source Maybe items are repeated, a count of 0 will yield an empty * sequence * @return a Flowable that repeats the sequence of items emitted by the source Maybe at most * {@code count} times * @throws IllegalArgumentException * if {@code count} is less than zero * @see <a href="http://reactivex.io/documentation/operators/repeat.html">ReactiveX operators documentation: Repeat</a> */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Flowable<T> repeat(long times) { return toFlowable().repeat(times); } /** * Returns a Flowable that repeats the sequence of items emitted by the source Maybe until * the provided stop function returns true. * <p> * <img width="640" height="310" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/repeat.on.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>This operator honors downstream backpressure.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code repeatUntil} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param stop * a boolean supplier that is called when the current Flowable completes and unless it returns * false, the current Flowable is resubscribed * @return the new Flowable instance * @throws NullPointerException * if {@code stop} is null * @see <a href="http://reactivex.io/documentation/operators/repeat.html">ReactiveX operators documentation: Repeat</a> */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Flowable<T> repeatUntil(BooleanSupplier stop) { return toFlowable().repeatUntil(stop); } /** * Returns a Flowable that emits the same values as the source Publisher with the exception of an * {@code onComplete}. An {@code onComplete} notification from the source will result in the emission of * a {@code void} item to the Publisher provided as an argument to the {@code notificationHandler} * function. If that Publisher calls {@code onComplete} or {@code onError} then {@code repeatWhen} will * call {@code onComplete} or {@code onError} on the child subscription. Otherwise, this Publisher will * resubscribe to the source Publisher. * <p> * <img width="640" height="430" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/repeatWhen.f.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The operator honors downstream backpressure and expects the source {@code Publisher} to honor backpressure as well. * If this expectation is violated, the operator <em>may</em> throw an {@code IllegalStateException}.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code repeatWhen} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param handler * receives a Publisher of notifications with which a user can complete or error, aborting the repeat. * @return the source Publisher modified with repeat logic * @see <a href="http://reactivex.io/documentation/operators/repeat.html">ReactiveX operators documentation: Repeat</a> */ @BackpressureSupport(BackpressureKind.FULL) @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Flowable<T> repeatWhen(final Function<? super Flowable<Object>, ? extends Publisher<?>> handler) { return toFlowable().repeatWhen(handler); } /** * Returns a Maybe that mirrors the source Maybe, resubscribing to it if it calls {@code onError} * (infinite retry count). * <p> * <img width="640" height="315" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/retry.png" alt=""> * <p> * If the source Maybe calls {@link MaybeObserver#onError}, this method will resubscribe to the source * Maybe rather than propagating the {@code onError} call. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code retry} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/retry.html">ReactiveX operators documentation: Retry</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> retry() { return retry(Long.MAX_VALUE, Functions.alwaysTrue()); } /** * Returns a Maybe that mirrors the source Maybe, resubscribing to it if it calls {@code onError} * and the predicate returns true for that specific exception and retry count. * <p> * <img width="640" height="315" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/retry.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code retry} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param predicate * the predicate that determines if a resubscription may happen in case of a specific exception * and retry count * @return the new Maybe instance * @see #retry() * @see <a href="http://reactivex.io/documentation/operators/retry.html">ReactiveX operators documentation: Retry</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> retry(BiPredicate<? super Integer, ? super Throwable> predicate) { return toFlowable().retry(predicate).singleElement(); } /** * Returns a Maybe that mirrors the source Maybe, resubscribing to it if it calls {@code onError} * up to a specified number of retries. * <p> * <img width="640" height="315" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/retry.png" alt=""> * <p> * If the source Maybe calls {@link MaybeObserver#onError}, this method will resubscribe to the source * Maybe for a maximum of {@code count} resubscriptions rather than propagating the * {@code onError} call. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code retry} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param count * the number of times to resubscribe if the current Maybe fails * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/retry.html">ReactiveX operators documentation: Retry</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> retry(long count) { return retry(count, Functions.alwaysTrue()); } /** * Retries at most times or until the predicate returns false, whichever happens first. * * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code retry} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param times the number of times to resubscribe if the current Maybe fails * @param predicate the predicate called with the failure Throwable and should return true to trigger a retry. * @return the new Maybe instance */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> retry(long times, Predicate<? super Throwable> predicate) { return toFlowable().retry(times, predicate).singleElement(); } /** * Retries the current Maybe if it fails and the predicate returns true. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code retry} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param predicate the predicate that receives the failure Throwable and should return true to trigger a retry. * @return the new Maybe instance */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> retry(Predicate<? super Throwable> predicate) { return retry(Long.MAX_VALUE, predicate); } /** * Retries until the given stop function returns true. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code retryUntil} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param stop the function that should return true to stop retrying * @return the new Maybe instance */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> retryUntil(final BooleanSupplier stop) { ObjectHelper.requireNonNull(stop, "stop is null"); return retry(Long.MAX_VALUE, Functions.predicateReverseFor(stop)); } /** * Returns a Maybe that emits the same values as the source Maybe with the exception of an * {@code onError}. An {@code onError} notification from the source will result in the emission of a * {@link Throwable} item to the Publisher provided as an argument to the {@code notificationHandler} * function. If that Publisher calls {@code onComplete} or {@code onError} then {@code retry} will call * {@code onComplete} or {@code onError} on the child subscription. Otherwise, this Publisher will * resubscribe to the source Publisher. * <p> * <img width="640" height="430" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/retryWhen.f.png" alt=""> * <p> * Example: * * This retries 3 times, each time incrementing the number of seconds it waits. * * <pre><code> * Maybe.create((MaybeEmitter&lt;? super String&gt; s) -&gt; { * System.out.println("subscribing"); * s.onError(new RuntimeException("always fails")); * }, BackpressureStrategy.BUFFER).retryWhen(attempts -&gt; { * return attempts.zipWith(Publisher.range(1, 3), (n, i) -&gt; i).flatMap(i -&gt; { * System.out.println("delay retry by " + i + " second(s)"); * return Flowable.timer(i, TimeUnit.SECONDS); * }); * }).blockingForEach(System.out::println); * </code></pre> * * Output is: * * <pre> {@code * subscribing * delay retry by 1 second(s) * subscribing * delay retry by 2 second(s) * subscribing * delay retry by 3 second(s) * subscribing * } </pre> * <p> * Note that the inner {@code Publisher} returned by the handler function should signal * either {@code onNext}, {@code onError} or {@code onComplete} in response to the received * {@code Throwable} to indicate the operator should retry or terminate. If the upstream to * the operator is asynchronous, signalling onNext followed by onComplete immediately may * result in the sequence to be completed immediately. Similarly, if this inner * {@code Publisher} signals {@code onError} or {@code onComplete} while the upstream is * active, the sequence is terminated with the same signal immediately. * <p> * The following example demonstrates how to retry an asynchronous source with a delay: * <pre><code> * Maybe.timer(1, TimeUnit.SECONDS) * .doOnSubscribe(s -&gt; System.out.println("subscribing")) * .map(v -&gt; { throw new RuntimeException(); }) * .retryWhen(errors -&gt; { * AtomicInteger counter = new AtomicInteger(); * return errors * .takeWhile(e -&gt; counter.getAndIncrement() != 3) * .flatMap(e -&gt; { * System.out.println("delay retry by " + counter.get() + " second(s)"); * return Flowable.timer(counter.get(), TimeUnit.SECONDS); * }); * }) * .blockingGet(); * </code></pre> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code retryWhen} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param handler * receives a Publisher of notifications with which a user can complete or error, aborting the * retry * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/retry.html">ReactiveX operators documentation: Retry</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> retryWhen( final Function<? super Flowable<Throwable>, ? extends Publisher<?>> handler) { return toFlowable().retryWhen(handler).singleElement(); } /** * Subscribes to a Maybe and ignores {@code onSuccess} and {@code onComplete} emissions. * <p> * If the Maybe emits an error, it is wrapped into an * {@link io.reactivex.exceptions.OnErrorNotImplementedException OnErrorNotImplementedException} * and routed to the RxJavaPlugins.onError handler. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code subscribe} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @return a {@link Disposable} reference with which the caller can stop receiving items before * the Maybe has finished sending them * @see <a href="http://reactivex.io/documentation/operators/subscribe.html">ReactiveX operators documentation: Subscribe</a> */ @SchedulerSupport(SchedulerSupport.NONE) public final Disposable subscribe() { return subscribe(Functions.emptyConsumer(), Functions.ON_ERROR_MISSING, Functions.EMPTY_ACTION); } /** * Subscribes to a Maybe and provides a callback to handle the items it emits. * <p> * If the Maybe emits an error, it is wrapped into an * {@link io.reactivex.exceptions.OnErrorNotImplementedException OnErrorNotImplementedException} * and routed to the RxJavaPlugins.onError handler. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code subscribe} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param onSuccess * the {@code Consumer<T>} you have designed to accept a success value from the Maybe * @return a {@link Disposable} reference with which the caller can stop receiving items before * the Maybe has finished sending them * @throws NullPointerException * if {@code onSuccess} is null * @see <a href="http://reactivex.io/documentation/operators/subscribe.html">ReactiveX operators documentation: Subscribe</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Disposable subscribe(Consumer<? super T> onSuccess) { return subscribe(onSuccess, Functions.ON_ERROR_MISSING, Functions.EMPTY_ACTION); } /** * Subscribes to a Maybe and provides callbacks to handle the items it emits and any error * notification it issues. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code subscribe} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param onSuccess * the {@code Consumer<T>} you have designed to accept a success value from the Maybe * @param onError * the {@code Consumer<Throwable>} you have designed to accept any error notification from the * Maybe * @return a {@link Disposable} reference with which the caller can stop receiving items before * the Maybe has finished sending them * @see <a href="http://reactivex.io/documentation/operators/subscribe.html">ReactiveX operators documentation: Subscribe</a> * @throws NullPointerException * if {@code onSuccess} is null, or * if {@code onError} is null */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final Disposable subscribe(Consumer<? super T> onSuccess, Consumer<? super Throwable> onError) { return subscribe(onSuccess, onError, Functions.EMPTY_ACTION); } /** * Subscribes to a Maybe and provides callbacks to handle the items it emits and any error or * completion notification it issues. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code subscribe} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param onSuccess * the {@code Consumer<T>} you have designed to accept a success value from the Maybe * @param onError * the {@code Consumer<Throwable>} you have designed to accept any error notification from the * Maybe * @param onComplete * the {@code Action} you have designed to accept a completion notification from the * Maybe * @return a {@link Disposable} reference with which the caller can stop receiving items before * the Maybe has finished sending them * @throws NullPointerException * if {@code onSuccess} is null, or * if {@code onError} is null, or * if {@code onComplete} is null * @see <a href="http://reactivex.io/documentation/operators/subscribe.html">ReactiveX operators documentation: Subscribe</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Disposable subscribe(Consumer<? super T> onSuccess, Consumer<? super Throwable> onError, Action onComplete) { ObjectHelper.requireNonNull(onSuccess, "onSuccess is null"); ObjectHelper.requireNonNull(onError, "onError is null"); ObjectHelper.requireNonNull(onComplete, "onComplete is null"); return subscribeWith(new MaybeCallbackObserver<T>(onSuccess, onError, onComplete)); } @SchedulerSupport(SchedulerSupport.NONE) @Override public final void subscribe(MaybeObserver<? super T> observer) { ObjectHelper.requireNonNull(observer, "observer is null"); observer = RxJavaPlugins.onSubscribe(this, observer); ObjectHelper.requireNonNull(observer, "The RxJavaPlugins.onSubscribe hook returned a null MaybeObserver. Please check the handler provided to RxJavaPlugins.setOnMaybeSubscribe for invalid null returns. Further reading: https://github.com/ReactiveX/RxJava/wiki/Plugins"); try { subscribeActual(observer); } catch (NullPointerException ex) { throw ex; } catch (Throwable ex) { Exceptions.throwIfFatal(ex); NullPointerException npe = new NullPointerException("subscribeActual failed"); npe.initCause(ex); throw npe; } } /** * Implement this method in subclasses to handle the incoming {@link MaybeObserver}s. * <p>There is no need to call any of the plugin hooks on the current {@code Maybe} instance or * the {@code MaybeObserver}; all hooks and basic safeguards have been * applied by {@link #subscribe(MaybeObserver)} before this method gets called. * @param observer the MaybeObserver to handle, not null */ protected abstract void subscribeActual(MaybeObserver<? super T> observer); /** * Asynchronously subscribes subscribers to this Maybe on the specified {@link Scheduler}. * <p> * <img width="640" height="752" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/Maybe.subscribeOn.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>you specify which {@link Scheduler} this operator will use.</dd> * </dl> * * @param scheduler * the {@link Scheduler} to perform subscription actions on * @return the new Maybe instance that its subscriptions happen on the specified {@link Scheduler} * @see <a href="http://reactivex.io/documentation/operators/subscribeon.html">ReactiveX operators documentation: SubscribeOn</a> * @see <a href="http://www.grahamlea.com/2014/07/rxjava-threading-examples/">RxJava Threading Examples</a> * @see #observeOn */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.CUSTOM) public final Maybe<T> subscribeOn(Scheduler scheduler) { ObjectHelper.requireNonNull(scheduler, "scheduler is null"); return RxJavaPlugins.onAssembly(new MaybeSubscribeOn<T>(this, scheduler)); } /** * Subscribes a given MaybeObserver (subclass) to this Maybe and returns the given * MaybeObserver as is. * <p>Usage example: * <pre><code> * Maybe&lt;Integer&gt; source = Maybe.just(1); * CompositeDisposable composite = new CompositeDisposable(); * * DisposableMaybeObserver&lt;Integer&gt; ds = new DisposableMaybeObserver&lt;&gt;() { * // ... * }; * * composite.add(source.subscribeWith(ds)); * </code></pre> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code subscribeWith} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <E> the type of the MaybeObserver to use and return * @param observer the MaybeObserver (subclass) to use and return, not null * @return the input {@code subscriber} * @throws NullPointerException if {@code subscriber} is null */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final <E extends MaybeObserver<? super T>> E subscribeWith(E observer) { subscribe(observer); return observer; } /** * Returns a Maybe that emits the items emitted by the source Maybe or the items of an alternate * MaybeSource if the current Maybe is empty. * <p> * <img width="640" height="445" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/switchifempty.m.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code switchIfEmpty} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param other * the alternate MaybeSource to subscribe to if the main does not emit any items * @return a Maybe that emits the items emitted by the source Maybe or the items of an * alternate MaybeSource if the source Maybe is empty. */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Maybe<T> switchIfEmpty(MaybeSource<? extends T> other) { ObjectHelper.requireNonNull(other, "other is null"); return RxJavaPlugins.onAssembly(new MaybeSwitchIfEmpty<T>(this, other)); } /** * Returns a Single that emits the items emitted by the source Maybe or the item of an alternate * SingleSource if the current Maybe is empty. * <p> * <img width="640" height="445" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/switchifempty.m.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code switchIfEmpty} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * <p>History: 2.1.4 - experimental * @param other * the alternate SingleSource to subscribe to if the main does not emit any items * @return a Single that emits the items emitted by the source Maybe or the item of an * alternate SingleSource if the source Maybe is empty. * @since 2.2 */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final Single<T> switchIfEmpty(SingleSource<? extends T> other) { ObjectHelper.requireNonNull(other, "other is null"); return RxJavaPlugins.onAssembly(new MaybeSwitchIfEmptySingle<T>(this, other)); } /** * Returns a Maybe that emits the items emitted by the source Maybe until a second MaybeSource * emits an item. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/takeUntil.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code takeUntil} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param other * the MaybeSource whose first emitted item will cause {@code takeUntil} to stop emitting items * from the source Maybe * @param <U> * the type of items emitted by {@code other} * @return a Maybe that emits the items emitted by the source Maybe until such time as {@code other} emits its first item * @see <a href="http://reactivex.io/documentation/operators/takeuntil.html">ReactiveX operators documentation: TakeUntil</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <U> Maybe<T> takeUntil(MaybeSource<U> other) { ObjectHelper.requireNonNull(other, "other is null"); return RxJavaPlugins.onAssembly(new MaybeTakeUntilMaybe<T, U>(this, other)); } /** * Returns a Maybe that emits the item emitted by the source Maybe until a second Publisher * emits an item. * <p> * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/takeUntil.png" alt=""> * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The {@code Publisher} is consumed in an unbounded fashion and is cancelled after the first item * emitted.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code takeUntil} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param other * the Publisher whose first emitted item will cause {@code takeUntil} to stop emitting items * from the source Publisher * @param <U> * the type of items emitted by {@code other} * @return a Maybe that emits the items emitted by the source Maybe until such time as {@code other} emits its first item * @see <a href="http://reactivex.io/documentation/operators/takeuntil.html">ReactiveX operators documentation: TakeUntil</a> */ @BackpressureSupport(BackpressureKind.UNBOUNDED_IN) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <U> Maybe<T> takeUntil(Publisher<U> other) { ObjectHelper.requireNonNull(other, "other is null"); return RxJavaPlugins.onAssembly(new MaybeTakeUntilPublisher<T, U>(this, other)); } /** * Returns a Maybe that mirrors the source Maybe but applies a timeout policy for each emitted * item. If the next item isn't emitted within the specified timeout duration starting from its predecessor, * the resulting Maybe terminates and notifies MaybeObservers of a {@code TimeoutException}. * <p> * <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/timeout.1.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>This version of {@code timeout} operates by default on the {@code computation} {@link Scheduler}.</dd> * </dl> * * @param timeout * maximum duration between emitted items before a timeout occurs * @param timeUnit * the unit of time that applies to the {@code timeout} argument. * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/timeout.html">ReactiveX operators documentation: Timeout</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.COMPUTATION) public final Maybe<T> timeout(long timeout, TimeUnit timeUnit) { return timeout(timeout, timeUnit, Schedulers.computation()); } /** * Returns a Maybe that mirrors the source Maybe but applies a timeout policy for each emitted * item. If the next item isn't emitted within the specified timeout duration starting from its predecessor, * the source MaybeSource is disposed and resulting Maybe begins instead to mirror a fallback MaybeSource. * <p> * <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/timeout.2.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>This version of {@code timeout} operates by default on the {@code computation} {@link Scheduler}.</dd> * </dl> * * @param timeout * maximum duration between items before a timeout occurs * @param timeUnit * the unit of time that applies to the {@code timeout} argument * @param fallback * the fallback MaybeSource to use in case of a timeout * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/timeout.html">ReactiveX operators documentation: Timeout</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.COMPUTATION) public final Maybe<T> timeout(long timeout, TimeUnit timeUnit, MaybeSource<? extends T> fallback) { ObjectHelper.requireNonNull(fallback, "fallback is null"); return timeout(timeout, timeUnit, Schedulers.computation(), fallback); } /** * Returns a Maybe that mirrors the source Maybe but applies a timeout policy for each emitted * item using a specified Scheduler. If the next item isn't emitted within the specified timeout duration * starting from its predecessor, the source MaybeSource is disposed and resulting Maybe begins instead * to mirror a fallback MaybeSource. * <p> * <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/timeout.2s.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>You specify which {@link Scheduler} this operator will use.</dd> * </dl> * * @param timeout * maximum duration between items before a timeout occurs * @param timeUnit * the unit of time that applies to the {@code timeout} argument * @param fallback * the MaybeSource to use as the fallback in case of a timeout * @param scheduler * the {@link Scheduler} to run the timeout timers on * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/timeout.html">ReactiveX operators documentation: Timeout</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.CUSTOM) public final Maybe<T> timeout(long timeout, TimeUnit timeUnit, Scheduler scheduler, MaybeSource<? extends T> fallback) { ObjectHelper.requireNonNull(fallback, "fallback is null"); return timeout(timer(timeout, timeUnit, scheduler), fallback); } /** * Returns a Maybe that mirrors the source Maybe but applies a timeout policy for each emitted * item, where this policy is governed on a specified Scheduler. If the next item isn't emitted within the * specified timeout duration starting from its predecessor, the resulting Maybe terminates and * notifies MaybeObservers of a {@code TimeoutException}. * <p> * <img width="640" height="305" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/timeout.1s.png" alt=""> * <dl> * <dt><b>Scheduler:</b></dt> * <dd>You specify which {@link Scheduler} this operator will use.</dd> * </dl> * * @param timeout * maximum duration between items before a timeout occurs * @param timeUnit * the unit of time that applies to the {@code timeout} argument * @param scheduler * the Scheduler to run the timeout timers on * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/timeout.html">ReactiveX operators documentation: Timeout</a> */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.CUSTOM) public final Maybe<T> timeout(long timeout, TimeUnit timeUnit, Scheduler scheduler) { return timeout(timer(timeout, timeUnit, scheduler)); } /** * If the current {@code Maybe} didn't signal an event before the {@code timeoutIndicator} {@link MaybeSource} signals, a * {@link TimeoutException} is signaled instead. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code timeout} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <U> the value type of the * @param timeoutIndicator the {@code MaybeSource} that indicates the timeout by signaling onSuccess * or onComplete. * @return the new Maybe instance */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <U> Maybe<T> timeout(MaybeSource<U> timeoutIndicator) { ObjectHelper.requireNonNull(timeoutIndicator, "timeoutIndicator is null"); return RxJavaPlugins.onAssembly(new MaybeTimeoutMaybe<T, U>(this, timeoutIndicator, null)); } /** * If the current {@code Maybe} didn't signal an event before the {@code timeoutIndicator} {@link MaybeSource} signals, * the current {@code Maybe} is disposed and the {@code fallback} {@code MaybeSource} subscribed to * as a continuation. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code timeout} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <U> the value type of the * @param timeoutIndicator the {@code MaybeSource} that indicates the timeout by signaling {@code onSuccess} * or {@code onComplete}. * @param fallback the {@code MaybeSource} that is subscribed to if the current {@code Maybe} times out * @return the new Maybe instance */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <U> Maybe<T> timeout(MaybeSource<U> timeoutIndicator, MaybeSource<? extends T> fallback) { ObjectHelper.requireNonNull(timeoutIndicator, "timeoutIndicator is null"); ObjectHelper.requireNonNull(fallback, "fallback is null"); return RxJavaPlugins.onAssembly(new MaybeTimeoutMaybe<T, U>(this, timeoutIndicator, fallback)); } /** * If the current {@code Maybe} source didn't signal an event before the {@code timeoutIndicator} {@link Publisher} signals, a * {@link TimeoutException} is signaled instead. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The {@code timeoutIndicator} {@link Publisher} is consumed in an unbounded manner and * is cancelled after its first item.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code timeout} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <U> the value type of the * @param timeoutIndicator the {@code MaybeSource} that indicates the timeout by signaling {@code onSuccess} * or {@code onComplete}. * @return the new Maybe instance */ @BackpressureSupport(BackpressureKind.UNBOUNDED_IN) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <U> Maybe<T> timeout(Publisher<U> timeoutIndicator) { ObjectHelper.requireNonNull(timeoutIndicator, "timeoutIndicator is null"); return RxJavaPlugins.onAssembly(new MaybeTimeoutPublisher<T, U>(this, timeoutIndicator, null)); } /** * If the current {@code Maybe} didn't signal an event before the {@code timeoutIndicator} {@link Publisher} signals, * the current {@code Maybe} is disposed and the {@code fallback} {@code MaybeSource} subscribed to * as a continuation. * <dl> * <dt><b>Backpressure:</b></dt> * <dd>The {@code timeoutIndicator} {@link Publisher} is consumed in an unbounded manner and * is cancelled after its first item.</dd> * <dt><b>Scheduler:</b></dt> * <dd>{@code timeout} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param <U> the value type of the * @param timeoutIndicator the {@code MaybeSource} that indicates the timeout by signaling {@code onSuccess} * or {@code onComplete} * @param fallback the {@code MaybeSource} that is subscribed to if the current {@code Maybe} times out * @return the new Maybe instance */ @BackpressureSupport(BackpressureKind.UNBOUNDED_IN) @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <U> Maybe<T> timeout(Publisher<U> timeoutIndicator, MaybeSource<? extends T> fallback) { ObjectHelper.requireNonNull(timeoutIndicator, "timeoutIndicator is null"); ObjectHelper.requireNonNull(fallback, "fallback is null"); return RxJavaPlugins.onAssembly(new MaybeTimeoutPublisher<T, U>(this, timeoutIndicator, fallback)); } /** * Returns a Maybe which makes sure when a MaybeObserver disposes the Disposable, * that call is propagated up on the specified scheduler. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code unsubscribeOn} calls dispose() of the upstream on the {@link Scheduler} you specify.</dd> * </dl> * @param scheduler the target scheduler where to execute the disposal * @return the new Maybe instance * @throws NullPointerException if scheduler is null */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.CUSTOM) public final Maybe<T> unsubscribeOn(final Scheduler scheduler) { ObjectHelper.requireNonNull(scheduler, "scheduler is null"); return RxJavaPlugins.onAssembly(new MaybeUnsubscribeOn<T>(this, scheduler)); } /** * Waits until this and the other MaybeSource signal a success value then applies the given BiFunction * to those values and emits the BiFunction's resulting value to downstream. * * <img width="640" height="380" src="https://raw.github.com/wiki/ReactiveX/RxJava/images/rx-operators/zip.png" alt=""> * * <p>If either this or the other MaybeSource is empty or signals an error, the resulting Maybe will * terminate immediately and dispose the other source. * * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code zipWith} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * * @param <U> * the type of items emitted by the {@code other} MaybeSource * @param <R> * the type of items emitted by the resulting Maybe * @param other * the other MaybeSource * @param zipper * a function that combines the pairs of items from the two MaybeSources to generate the items to * be emitted by the resulting Maybe * @return the new Maybe instance * @see <a href="http://reactivex.io/documentation/operators/zip.html">ReactiveX operators documentation: Zip</a> */ @CheckReturnValue @NonNull @SchedulerSupport(SchedulerSupport.NONE) public final <U, R> Maybe<R> zipWith(MaybeSource<? extends U> other, BiFunction<? super T, ? super U, ? extends R> zipper) { ObjectHelper.requireNonNull(other, "other is null"); return zip(this, other, zipper); } // ------------------------------------------------------------------ // Test helper // ------------------------------------------------------------------ /** * Creates a TestObserver and subscribes * it to this Maybe. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code test} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @return the new TestObserver instance */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final TestObserver<T> test() { TestObserver<T> to = new TestObserver<T>(); subscribe(to); return to; } /** * Creates a TestObserver optionally in cancelled state, then subscribes it to this Maybe. * <dl> * <dt><b>Scheduler:</b></dt> * <dd>{@code test} does not operate by default on a particular {@link Scheduler}.</dd> * </dl> * @param cancelled if true, the TestObserver will be cancelled before subscribing to this * Maybe. * @return the new TestObserver instance */ @CheckReturnValue @SchedulerSupport(SchedulerSupport.NONE) public final TestObserver<T> test(boolean cancelled) { TestObserver<T> to = new TestObserver<T>(); if (cancelled) { to.cancel(); } subscribe(to); return to; } }
apache-2.0
mdoering/backbone
life/Plantae/Magnoliophyta/Liliopsida/Poales/Cyperaceae/Isolepis/Isolepis hystrix/README.md
188
# Isolepis hystrix (Thunb.) Nees SPECIES #### Status ACCEPTED #### According to The Catalogue of Life, 3rd January 2011 #### Published in null #### Original name null ### Remarks null
apache-2.0
akash1808/python-novaclient
novaclient/v1_1/flavors.py
6740
# Copyright 2010 Jacob Kaplan-Moss # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Flavor interface. """ from oslo_utils import strutils from six.moves.urllib import parse from novaclient import base from novaclient import exceptions from novaclient.openstack.common.gettextutils import _ from novaclient import utils class Flavor(base.Resource): """ A flavor is an available hardware configuration for a server. """ HUMAN_ID = True def __repr__(self): return "<Flavor: %s>" % self.name @property def ephemeral(self): """ Provide a user-friendly accessor to OS-FLV-EXT-DATA:ephemeral """ return self._info.get("OS-FLV-EXT-DATA:ephemeral", 'N/A') @property def is_public(self): """ Provide a user-friendly accessor to os-flavor-access:is_public """ return self._info.get("os-flavor-access:is_public", 'N/A') def get_keys(self): """ Get extra specs from a flavor. :param flavor: The :class:`Flavor` to get extra specs from """ _resp, body = self.manager.api.client.get( "/flavors/%s/os-extra_specs" % base.getid(self)) return body["extra_specs"] def set_keys(self, metadata): """ Set extra specs on a flavor. :param flavor: The :class:`Flavor` to set extra spec on :param metadata: A dict of key/value pairs to be set """ utils.validate_flavor_metadata_keys(metadata.keys()) body = {'extra_specs': metadata} return self.manager._create( "/flavors/%s/os-extra_specs" % base.getid(self), body, "extra_specs", return_raw=True) def unset_keys(self, keys): """ Unset extra specs on a flavor. :param flavor: The :class:`Flavor` to unset extra spec on :param keys: A list of keys to be unset """ for k in keys: self.manager._delete( "/flavors/%s/os-extra_specs/%s" % (base.getid(self), k)) def delete(self): """ Delete this flavor. """ self.manager.delete(self) class FlavorManager(base.ManagerWithFind): """ Manage :class:`Flavor` resources. """ resource_class = Flavor is_alphanum_id_allowed = True def list(self, detailed=True, is_public=True): """ Get a list of all flavors. :rtype: list of :class:`Flavor`. """ qparams = {} # is_public is ternary - None means give all flavors. # By default Nova assumes True and gives admins public flavors # and flavors from their own projects only. if not is_public: qparams['is_public'] = is_public query_string = "?%s" % parse.urlencode(qparams) if qparams else "" detail = "" if detailed: detail = "/detail" return self._list("/flavors%s%s" % (detail, query_string), "flavors") def get(self, flavor): """ Get a specific flavor. :param flavor: The ID of the :class:`Flavor` to get. :rtype: :class:`Flavor` """ return self._get("/flavors/%s" % base.getid(flavor), "flavor") def delete(self, flavor): """ Delete a specific flavor. :param flavor: The ID of the :class:`Flavor` to get. """ self._delete("/flavors/%s" % base.getid(flavor)) def _build_body(self, name, ram, vcpus, disk, id, swap, ephemeral, rxtx_factor, is_public): return { "flavor": { "name": name, "ram": ram, "vcpus": vcpus, "disk": disk, "id": id, "swap": swap, "OS-FLV-EXT-DATA:ephemeral": ephemeral, "rxtx_factor": rxtx_factor, "os-flavor-access:is_public": is_public, } } def create(self, name, ram, vcpus, disk, flavorid="auto", ephemeral=0, swap=0, rxtx_factor=1.0, is_public=True): """ Create a flavor. :param name: Descriptive name of the flavor :param ram: Memory in MB for the flavor :param vcpus: Number of VCPUs for the flavor :param disk: Size of local disk in GB :param flavorid: ID for the flavor (optional). You can use the reserved value ``"auto"`` to have Nova generate a UUID for the flavor in cases where you cannot simply pass ``None``. :param swap: Swap space in MB :param rxtx_factor: RX/TX factor :rtype: :class:`Flavor` """ try: ram = int(ram) except (TypeError, ValueError): raise exceptions.CommandError(_("Ram must be an integer.")) try: vcpus = int(vcpus) except (TypeError, ValueError): raise exceptions.CommandError(_("VCPUs must be an integer.")) try: disk = int(disk) except (TypeError, ValueError): raise exceptions.CommandError(_("Disk must be an integer.")) if flavorid == "auto": flavorid = None try: swap = int(swap) except (TypeError, ValueError): raise exceptions.CommandError(_("Swap must be an integer.")) try: ephemeral = int(ephemeral) except (TypeError, ValueError): raise exceptions.CommandError(_("Ephemeral must be an integer.")) try: rxtx_factor = float(rxtx_factor) except (TypeError, ValueError): raise exceptions.CommandError(_("rxtx_factor must be a float.")) try: is_public = strutils.bool_from_string(is_public, True) except Exception: raise exceptions.CommandError(_("is_public must be a boolean.")) body = self._build_body(name, ram, vcpus, disk, flavorid, swap, ephemeral, rxtx_factor, is_public) return self._create("/flavors", body, "flavor")
apache-2.0
heartbeatsjp/check_happo
commands.go
1406
package main import ( "fmt" "os" "github.com/codegangsta/cli" "github.com/heartbeatsjp/check_happo/command" "github.com/heartbeatsjp/happo-agent/halib" ) // GlobalFlags are global level options var GlobalFlags = []cli.Flag{} // Commands is list of subcommand var Commands = []cli.Command{ { Name: "monitor", Usage: "", Action: command.CmdMonitor, Flags: []cli.Flag{ cli.StringFlag{ Name: "host, H", Usage: "hostname or IP address", }, cli.IntFlag{ Name: "port, P", Value: halib.DefaultAgentPort, Usage: "Port number", }, cli.StringSliceFlag{ Name: "proxy, X", Value: &cli.StringSlice{}, Usage: "Proxy hostname[:port] (You can multiple define.)", }, cli.StringFlag{ Name: "plugin_name, p", Usage: "Plugin Name", }, cli.StringFlag{ Name: "plugin_option, o", Usage: "Plugin Option", }, cli.StringFlag{ Name: "timeout, t", Usage: "Connect Timeout", }, cli.BoolFlag{ Name: "verbose, v", Usage: "verbose output", }, }, }, { Name: "check_happo", Usage: "", Action: command.CmdTest, Flags: []cli.Flag{}, }, } // CommandNotFound implements action when subcommand not found func CommandNotFound(c *cli.Context, command string) { fmt.Fprintf(os.Stderr, "%s: '%s' is not a %s command. See '%s --help'.", c.App.Name, command, c.App.Name, c.App.Name) os.Exit(2) }
apache-2.0
JonathanSalwan/Triton
src/libtriton/includes/triton/liftingToLLVM.hpp
1827
//! \file /* ** Copyright (C) - Triton ** ** This program is under the terms of the Apache License 2.0. */ #ifndef TRITON_LIFTINGTOLLVM_HPP #define TRITON_LIFTINGTOLLVM_HPP #include <map> #include <memory> #include <ostream> #include <triton/ast.hpp> #include <triton/dllexport.hpp> #include <triton/symbolicExpression.hpp> //! The Triton namespace namespace triton { /*! * \addtogroup triton * @{ */ //! The Engines namespace namespace engines { /*! * \ingroup triton * \addtogroup engines * @{ */ //! The Lifters namespace namespace lifters { /*! * \ingroup engines * \addtogroup lifters * @{ */ //! \class LiftingToLLVM /*! \brief The lifting to LLVM class. */ class LiftingToLLVM { public: //! Constructor. TRITON_EXPORT LiftingToLLVM(); //! Lifts a symbolic expression and all its references to LLVM format. `fname` represents the name of the LLVM function. TRITON_EXPORT std::ostream& liftToLLVM(std::ostream& stream, const triton::engines::symbolic::SharedSymbolicExpression& expr, const char* fname="__triton", bool optimize=false); //! Lifts a abstract node and all its references to LLVM format. `fname` represents the name of the LLVM function. TRITON_EXPORT std::ostream& liftToLLVM(std::ostream& stream, const triton::ast::SharedAbstractNode& node, const char* fname="__triton", bool optimize=false); //! Lifts and simplify an AST using LLVM TRITON_EXPORT triton::ast::SharedAbstractNode simplifyAstViaLLVM(const triton::ast::SharedAbstractNode& node) const; }; /*! @} End of lifters namespace */ }; /*! @} End of engines namespace */ }; /*! @} End of triton namespace */ }; #endif /* TRITON_LIFTINGTOLLVM_HPP */
apache-2.0
c64scene-ar/sid-player
mamakilla/mp_sw.asm
16715
;********** PVM player *********** ; 19-08-2015 v0.01: Beat display at logo. ; v0.02: Raster bars added - Stable raster from Ricardo's code ; xx-09-2015 v0.10: VUMeter added ; 02-09-2015 v0.15: Playtime timer added ; 04-09-2015 v0.16: Code cleanup !to "mp_sw.prg" !sl "labels.txt" !source "macros.asm" ; ***** CONFIG SECTION ***** NTSC = 0 ;Compile as NTSC? 1 = true ; ***** variables ***** _charcount = $FB ;16-bit char counter _topline = $05 ;closing borders top raster _bottomline = $06 ;closing borders bottom raster _framecnt = $07 ;frameskip counter _flag = $23 ;generic flag _beat = $02 ;beat color cycle counter _vucnt = $10 ;VU Meter frame skip counter $10, $11, $12 _vunote = $13 ;VU Meter note played previous frame (for sustain effect) $13, $14, $15 _vufrm = $16 ;VU Meter current animframe $16, $17, $18 _vustat = $19 ;VU Meter state machine status $19, $1a, $1b (0 = idle, 4 = attack, 3 = decay, 2 = sustain, 1 = release) _vugate = $1c ;Gate status for previous frame $1c, $1d, $1e _tframe = $30 ;playtime frame count _tsecs = $31 ;playtime seconds _tmins = $32 ;playtime minutes _tpos = $33 ;playtime digit screen address $33,$34 ; ***** Constants ***** BEATINST = $02 ;Instrument to use for beat effect VUMETERS = $7f8 ;VU Meters sprite pointers !source "sidwizard_labels.asm" ;SID-Wizard labels ;********************** *= $0801 !zone start !word $080B,2015 !raw $9e,"2061",0,0,0 lda #$00 sta _flag sta _beat jsr initmem .xx06 bit _flag bpl .xx06 lda #$6b sta $d011 ;disable screen ;color fade ldx #$04 .xx07 lda _framecnt bne .xx07 lda #$04 sta _framecnt lda _colortable1,x sta $d020 dex bpl .xx07 ;set main irq sei lda #$fb ;this is how to tell at which rasterline we want the irq to be triggered sta $d012 inc _flag ;make music wait for us to be ready lda #<mainirq ;this is how we set up sta $fffe ;the address of our interrupt code lda #>mainirq sta $ffff cli ;--- jsr initscr ;Init screen lda #$1b sta $d011 ; enable screen dec _flag ;signal IRQ we're ready ;**** Main Loop **** ;Print Playtime .pp0 lda _tframe cmp #$31 ;wait for tframe reset bne .cc0 ;if not go check BEATINST lda _tsecs ldy #$04 jsr printbcd ;print seconds dey lda _tmins jsr printbcd ;print minutes ;Color cycle logo bars when BEATINST is played .cc0 ldy #$02 .cc2 lda shinst,y cmp _tinst,y ;check instrument playing changed beq .cc1 sta _tinst,y cmp #BEATINST ;changed, now check that is BEATINST bne .cc1 ; trigger color cycle lda #$03 sta _beat ; .cc1 dey bpl .cc2 bmi .pp0 .end jmp .end ;**** Print BCD **** printbcd: tax and #$0f jsr .pbcd txa lsr lsr lsr lsr .pbcd ora #$30 sta (_tpos),y dey rts ;***** White flash color cyble after initial screen close effect ***** _colortable1: !byte $00,$0b,$0c,$0f,$01 ;***** Color cycle for the beat effect ***** _colortable2: !byte $09,$08,$01 ;***** temp inst status ***** _tinst: !byte $00, $00, $00 ;***** sprite block table ***** _sustainmap: _spblock: !byte $00,$01,$01,$02,$03,$03,$04,$05,$05,$06,$07,$07,$08,$09,$09,$0a ;***** Attack frame skip ***** _attackframes: !byte $00,$00,$00,$00,$00,$00,$00,$00,$00,$01,$02,$03,$04,$0e,$18,$27 ;***** Attack sprite frame increase _attackinc: !byte $0a,$0a,$0a,$0a,$05,$04,$04,$03,$02,$02,$02,$01,$01,$01,$01,$01 ;***** Decay/Release frame skip ***** _drframes: !byte $01,$01,$01,$01,$01,$01,$00,$01,$02,$03,$06,$0b,$0e,$2c,$4a,$77 ; !byte $00,$00,$00,$00,$00,$01,$00,$01,$02,$03,$06,$0b,$0e,$2c,$4a,$77 ;***** Decay/Release sprite frame decrease ***** _drinc: !byte $01,$01,$01,$01,$01,$01,$01,$02,$02,$01,$01,$01,$01,$01,$01,$01 ; !byte $0a,$0a,$05,$03,$02,$03,$01,$02,$02,$01,$01,$01,$01,$01,$01,$01 ;********* Init Screen ********* initscr: ldx #$00 stx $D021 ;clear screen lda #$20 ldx #$00 .ic1 sta $0400,x sta $0500,x sta $0600,x sta $0700,x inx bne .ic1 ;set color ram lda #$03 ldx #$00 .ic0 sta $D900,x sta $DA00,x sta $DB00,x inx bne .ic0 ;set color for logo - 10 rows unrolled ldx #$27 ;40 columns .lx1 !set lrow = 0 !do { lda map_data+lrow,x tay lda charset_attrib_data,y sta $d800+lrow,x !set lrow = lrow + 40 } while lrow < 400 dex bpl .lx1 ;Init sprites lda #$d3 sta VUMETERS ;set block sta VUMETERS+1 sta VUMETERS+2 ldx #$07 stx $d01c ;set multicolor stx $d015 ;enable ldx #$05 ;colors stx $d027 stx $d028 stx $d029 ldx #$0d stx $d025 ldx #$0b stx $d026 lda #$a8 ;Y-coordinates sta $d001 sta $d003 sta $d005 ldx #$ff ;X-coordinates stx $d010 stx $d017 stx $d01d ;expand lda #$04 sta $d000 lda #$17 sta $d002 lda #$2A sta $d004 ;print data fields labels ;Changed from single routine + init for each string, to dedicated hardcoded routine for each string, shorter and easier to read. ;Name 1x2 charset +PRINT1X2 _SNameLabel, $13, 10, 11, $400, $08 ;Author 1x1 charset +PRINT1X1 _SAuthorLabel, $0d, 1, 17, $400, $0f ;Date 1x1 charset +PRINT1X1 _SDateLabel, $11, 1, 19, $400, $0f ;Playtime 1x1 charset +PRINT1X1 _SPlayLabel, $0e, 1, 15, $400, $0f ;Credits 1x1 charset +PRINT1X1 _SCodeLabel, $0d, 13, 24, $400, $0b +PRINT1X1 _SCharLabel, $11, 11, 23, $400, $0b +PRINT1X1 _SGFXLabel, $0d, 13, 22, $400, $0b ;VUMeter label ; +PRINT1X1 _SVULabel, $04, 31, 18, $400, $0f rts ; **** Configure memory, set IRQ routine **** initmem: sei ;disable maskable IRQs lda #$7f sta $dc0d ;disable timer interrupts which can be generated by the two CIA chips sta $dd0d ;the kernal uses such an interrupt to flash the cursor and scan the keyboard, so we better ;stop it. lda $dc0d ;by reading this two registers we negate any pending CIA irqs. lda $dd0d ;if we don't do this, a pending CIA irq might occur after we finish setting up our irq. ;we don't want that to happen. lda #COLR_CHAR_MC1 sta $d022 ;Multi color 1 lda #COLR_CHAR_MC2 sta $d023 ;Multi color 2 lda #$01 ;this is how to tell the VICII to generate a raster interrupt sta $d01a lda #$33 sta _topline lda #$fa ;this is how to tell at which rasterline we want the irq to be triggered sta _bottomline sta $d012 lda $D020 sta _charcount ;temp lda #$1b ;as there are more than 256 rasterlines, the topmost bit of $d011 serves as sta $d011 ;the 8th bit for the rasterline we want our irq to be triggered. ;here we simply set up a character screen, leaving the topmost bit 0. lda #$35 ;we turn off the BASIC and KERNAL rom here sta $01 ;the cpu now sees RAM everywhere except at $d000-$e000, where still the registers of ;SID/VICII/etc are visible lda #<bottom_irq ;this is how we set up sta $fffe ;the address of our interrupt code lda #>bottom_irq sta $ffff ; Init VU Meters variables ldx #$0c lda #$00 .im1 sta _vucnt,x dex bpl .im1 sta _tsecs ; Init playtime sta _tmins lda #$fe sta _vugate sta _vugate+1 sta _vugate+2 lda #$03 sta _framecnt ; Init frame counter lda #$31 ; Init playtime frame counter (for 50Hz) sta _tframe lda #$63 ldx #$06 sta _tpos stx _tpos+1 lda #$00 jsr inisub ; Init music cli ;enable maskable interrupts again rts ; ******** IRQ Routine ******** ; *main irq, play music (1x speed) mainirq: pha txa pha tya pha lda #$3d ;set up next raster irq at line $40 (for raster bars) sta $d012 lda #<irq_rasterbars ;this is how we set up sta $fffe ;the address of our interrupt code lda #>irq_rasterbars sta $ffff lda #%10111101 ;chargen at $3000 - matrix at $2c00 sta $d018 lda $d016 ora #%00010000 ;set multicolor mode sta $d016 ;----- dec _framecnt bpl .ir01 ;---- ;**** Beat color cycle **** ldy _beat ;check if color cycle is in progress beq .ir03 dey sty _beat lda _colortable2,y sta $d022 sta rr1+1 ;modify code for raster bars .ir03 lda #$03 sta _framecnt .ir01 bit _flag ;Wait until main program flags us to start playing music. bpl .ir02 ;inc $d020 jsr playsub ;Play music ;dec $d020 jsr SUpdate ;Update variables jsr VUpdate ;Update VU Meters jsr PTUpdate ;Update Playtime .ir02 ;----- asl $d019 pla tay pla tax pla rti ; *raster bars IRQ - for logo bars irq_rasterbars: pha ; saves A, X, Y txa pha tya pha +STABILIZE_RASTER sei jsr tworeds ldy #$01 jsr blacks1 ;two blacks - bad line jsr tworeds !if NTSC = 1 { ldy #$34 nop } else { ldy #$33 } jsr blacks1 ;six blacks jsr tworeds ldy #$08 jsr blacks1 ;two blacks jsr tworeds !if NTSC = 1 { ldy #$35 } else { ldy #$33 } jsr blacks1 ;six blacks jsr tworeds ldy #$01 ;two blacks - bad line jsr blacks1 jsr tworeds !if NTSC = 1 { ldy #$33 } else { ldy #$32 } jsr blacks1 ;six blacks jsr tworeds ldy#$08 jsr blacks1 ;two blacks !if NTSC = 1 { nop } jsr tworeds asl $d019 cli lda #$82 ;set up next raster irq at line $82 (just before 10th text row) sta $d012 lda #<secirq ;this is how we set up sta $fffe ;the address of our interrupt code lda #>secirq sta $ffff pla ; restores A, X, Y tay pla tax pla rti ; restores previous PC, status tworeds: ldy #$09 ;+2 .rra dey ;+2 bne .rra ;+2 +1 bit $00 ;+3 rr1 lda#$09 ;+2 sta$d020 ;+4 ldy #$17 ;+2 .rrb dey ;+2 bne .rrb ;+2 +1 bit $00 ;+3 !if NTSC = 1 { nop ;+2 nop ;+2 } lda #$00 sta $d020 rts blacks1: ;ldy #$01 ;+2 .rrc dey ;+2 bne .rrc ;+2 +1 bit $00 ;+3 !if NTSC = 1 { nop ;+2 nop ;+2 } rts ; *secondary IRQ - switchs from logo charset to 'normal' one secirq: pha txa pha tya pha lda #$fb ;set up next raster irq at line $fb (just after start of bottom border) sta $d012 lda #<mainirq ;this is how we set up sta $fffe ;the address of our interrupt code lda #>mainirq sta $ffff lda #%00011111 ;chargen at $3800 - matrix at $0400 sta $d018 lda $d016 and #%11101111 ;disable multicolor mode sta $d016 asl $d019 pla tay pla tax pla rti ; ******* subs IRQ ******* bottom_irq: pha txa pha tya pha ;----- lda _bottomline and #$07 cmp #$03 bne .bi01 nop nop nop nop nop nop nop nop nop nop nop nop .bi01 nop nop nop nop nop nop nop ; nop ; nop lda #$00 sta $d020 lda #$7b ;0b sta $d011 lda _topline sta $d012 lda #<top_irq ;this is how we set up sta $fffe ;the address of our interrupt code lda #>top_irq sta $ffff ;----- asl $d019 pla tay pla tax pla rti top_irq: pha txa pha tya pha ;----- ldx _topline inx cpx _bottomline bne .tpi01 ;center reached lda #$04 sta _framecnt lda #$0f ;this is how to tell at which rasterline we want the irq to be triggered sta $d012 dec _flag ;flag main routine, we're ready for next part lda #<idle_irq sta $fffe lda #>idle_irq bne .tpi02 ;sta $ffff .tpi01 lda _topline and #$07 cmp #$03 bne .ti01 ;bad line ; nop nop nop nop nop nop nop nop nop nop nop nop ; nop ; nop ; nop ; nop ; nop .ti01 nop nop nop nop lda #$1b sta $d011 lda _charcount sta $d020 stx _topline dec _bottomline lda _bottomline sta $d012 lda #<bottom_irq ;this is how we set up sta $fffe ;the address of our interrupt code lda #>bottom_irq .tpi02 sta $ffff ;----- asl $d019 pla tay pla tax pla rti idle_irq: pha txa pha tya pha ;----- dec _framecnt ;----- asl $d019 pla tay pla tax pla rti ;**** ;***** VUMeters code VUpdate: ;first check if gate status changed ldx #$03 .vu1 lda gate,x cmp _vugate,x beq .vu2 ;no change sta _vugate,x bcs .vu3 ;change to gate set -> attack ;change to gate clear -> release lda shad,x ;get Attack value lsr lsr lsr lsr tay lda _attackframes,y ;load frameskip counter sta _vucnt,x lda #$01 bne .vu4 .vu3 lda shsr ;ger Release value and #$15 tay lda _drframes,y ;load frameskip counter sta _vucnt,x lda #$04 .vu4 sta _vustat,x .vu2 dex bpl .vu1 ;state machine ldx #$02 .vu5 lda _vustat,x ;sta $720,x ;<-debug bne .vu0 jmp .vu6 ;idle .vu0 cmp #$04 bcc .vu7 ;-----attack ;check if frameskip reached 0 lda _vucnt,x beq .vu8b jmp .vu8 ;no .vu8b lda shnote,x ;get note ;sta $6f8,x ;Debug sta _vunote,x ;save for later lda shad,x ;get Attack value lsr lsr lsr lsr tay ;Yreg = Attack ;sta $748,x ;<-debug lda _attackframes,y ;reload frame skip counter sta _vucnt,x lda _attackinc,y ;get by how much we got to change sprite animation clc adc _vufrm,x ;and add to animation frame cmp #$0a bcc .vu9 lda #$0a ;reached full volume dec _vustat,x ;go to decay state pha lda _drframes,y ;set new frameskip sta _vucnt,x pla .vu9 sta _vufrm,x jmp .vu6 .vu7 cmp #$03 bcc .vu10 ;-----decay ;check if frameskip reached 0 lda _vucnt,x beq .vu8a jmp .vu8 ;no .vu8a lda shad,x ;get Delay value and #$0f ;sta $770,x ;<-debug tay ;Yreg = Decay lda _drframes,y ;reload frame skip counter sta _vucnt,x lda _vufrm,x ;get animation frame sec sbc _drinc,y ;and subtract appropriate value pha ;sta _vufrm,x lda shsr,x ;get curret sustain level lsr lsr lsr lsr tay pla clc cmp _sustainmap,y ;map sustain level to animation frame ;cmp _vufrm,x beq .vu11 ;reached sustain level bcs .vu14 ;not yet lda _sustainmap,y ;sta _vufrm,x ;we were below sustain level .vu11 dec _vustat,x ;go to sustain state .vu14 sta _vufrm,x bne .vu6 .vu10 cmp #$02 bcc .vu12 ;-----sustain lda _vucnt,x bne .vu8 ;no lda #$02 sta _vucnt,x lda shsr,x ;get Sustain value lsr lsr lsr lsr ;sta $798,x ;<-debug tay ;Yreg = Sustain lda _sustainmap,y ;map sustain level to animation frame cmp _vufrm,x bcs .vu15 ;sustain level is greater or equal than current animation frame, nothing to do sta _vufrm,x ;update frame only if sustain level decreases .vu15 cmp #$0a ;animate it a little if note changes while in sustain beq .vu6 ;if full volume, continue lda shnote,x ;otherwise cmp _vunote,x ;check if note changed beq .vu6 inc _vufrm,x ;increase frame if so sta _vunote,x bpl .vu6 .vu12 ;-----release ;check if frameskip reached 0 lda _vucnt,x bne .vu8 ;no lda shsr,x ;get Release value and #$0f ;sta $7c0,x ;<-debug tay ;Yreg = Release lda _drframes,y ;reload frame skip counter sta _vucnt,x lda _vufrm,x ;get animation frame sec sbc _drinc,y ;and subtract appropriate value sta _vufrm,x beq .vu13 ;reached 0 bpl .vu6 ;not yet lda #$00 sta _vufrm,x ;we were below 0 .vu13 dec _vustat,x ;go to idle state beq .vu6 .vu8 dec _vucnt,x .vu6 lda _vufrm,x clc adc #$d3 sta VUMETERS,x ;update sprite pointers dex bmi .vue jmp .vu5 .vue rts ;**** Update Playtime PTUpdate: dec _tframe bpl .pte lda #$31 ;Reset _tframe sta _tframe sed clc lda #$01 adc _tsecs cmp #$60 bne .ptu0 clc lda #$01 adc _tmins sta _tmins lda #$00 .ptu0 sta _tsecs cld .pte rts ;**** ;**** Update variables SUpdate: ldy #$02 .su1 ldx voffset,y lda CURNOT1,x sta shnote,y lda CURINS1,x sta shinst,y lda AD1,x sta shad,y lda SR1,x sta shsr,y lda PTNGATE1,x sta gate,y eor #$ff ;sta $7a8,y ;<-debug dey bpl .su1 ; lda CURNOT1 ; sta shnote ; lda CURNOT2 ; sta shnote+1 ; lda CURNOT3 ; sta shnote+2 ; lda CURINS1 ; sta shinst ; lda CURINS2 ; sta shinst+1 ; lda CURINS3 ; sta shinst+2 ; lda AD1 ; sta shad ; lda AD2 ; sta shad+1 ; lda AD3 ; sta shad+2 ; lda SR1 ; sta shsr ; lda SR2 ; sta shsr+1 ; lda SR3 ; sta shsr+2 ; lda PTNGATE1 ; sta gate ; lda PTNGATE1 ; sta gate ; lda PTNGATE1 ; sta gate codeend: rts ;**** SID Variables (use only with SID-Wizard) shinst: !8 0,0,0 shad: !8 0,0,0 shsr: !8 0,0,0 shnote: !8 0,0,0 gate: !8 0,0,0 voffset: !8 0,7,14 ;index offset varsend: ;***** MUSIC ***** *=$0fb8 !bin "mamakilla27.dat",,2 ;*=$1000 ;!bin "test.bin.seq" musicend: ;***** Logo charset ***** !source "logo2_rosa.asm" ;***** Sprites ***** *=$34C0 !bin "sprites2.bin" ;***** Main charset ***** *=$3800 !bin "Arlek-05b_7bit_fixed_woz.bin" ;"charset2.bin" ;***** Text ***** _SNameLabel: !scrxor $80,"4516 " !scrxor $80,"mama killa" !scrxor $80," 0123" _SAuthorLabel: !scr "author: " !scr "comu" _SDateLabel: !scr "released: " !scr "24/10/15" _SPlayLabel: !scr "playtime: 00:00" _SGFXLabel: !scr "gfx: alakran" _SCharLabel: !scr "charset: arlequin" _SCodeLabel: !scr "code: the" !8 95 !scr "woz" ;_SVULabel: ; !scrxor $80,"1 2 3"
apache-2.0
gbehrmann/JGlobus
ssl-proxies/src/main/java/org/globus/gsi/stores/ResourceSigningPolicyStore.java
8094
/* * Copyright 1999-2010 University of Chicago * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is * distributed on an "AS IS" BASIS,WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either * express or implied. * * See the License for the specific language governing permissions and limitations under the License. */ package org.globus.gsi.stores; import org.globus.gsi.provider.SigningPolicyStore; import org.globus.gsi.provider.SigningPolicyStoreException; import org.globus.gsi.provider.SigningPolicyStoreParameters; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.Log; import java.io.IOException; import java.net.URI; import java.security.InvalidAlgorithmParameterException; import java.util.Collection; import java.util.HashMap; import java.util.Map; import javax.security.auth.x500.X500Principal; import org.globus.gsi.SigningPolicy; import org.globus.gsi.util.CertificateIOUtil; import org.globus.util.GlobusResource; import org.globus.util.GlobusPathMatchingResourcePatternResolver; /** * FILL ME * * @author [email protected] */ public class ResourceSigningPolicyStore implements SigningPolicyStore { private GlobusPathMatchingResourcePatternResolver globusResolver = new GlobusPathMatchingResourcePatternResolver(); private Map<URI, ResourceSigningPolicy> signingPolicyFileMap = new HashMap<URI, ResourceSigningPolicy>(); private Map<String, SigningPolicy> policyMap = new HashMap<String, SigningPolicy>(); private ResourceSigningPolicyStoreParameters parameters; private Log logger = LogFactory.getLog(ResourceSigningPolicyStore.class.getCanonicalName()); private final Map<String, Long> invalidPoliciesCache = new HashMap<String, Long>(); private final Map<String, Long> validPoliciesCache = new HashMap<String, Long>(); private final static long CACHE_TIME_MILLIS = 3600*1000; private long lastUpdate = 0; /** * Please use the {@link Stores} class to generate Key/Cert stores */ public ResourceSigningPolicyStore(SigningPolicyStoreParameters param) throws InvalidAlgorithmParameterException { if (param == null) { throw new IllegalArgumentException(); } if (!(param instanceof ResourceSigningPolicyStoreParameters)) { throw new InvalidAlgorithmParameterException(); } this.parameters = (ResourceSigningPolicyStoreParameters) param; } public synchronized SigningPolicy getSigningPolicy(X500Principal caPrincipal) throws SigningPolicyStoreException { if (caPrincipal == null) { return null; } String name = caPrincipal.getName(); long now = System.currentTimeMillis(); String hash = CertificateIOUtil.nameHash(caPrincipal); Long validCacheTime = validPoliciesCache.get(hash); Long invalidCacheTime = invalidPoliciesCache.get(hash); if ((invalidCacheTime != null) && (now - invalidCacheTime < 10*CACHE_TIME_MILLIS)) { return null; } if ((validCacheTime == null) || (now - validCacheTime >= CACHE_TIME_MILLIS) || !this.policyMap.containsKey(name)) { loadPolicy(hash); } return this.policyMap.get(name); } private synchronized void loadPolicy(String hash) throws SigningPolicyStoreException { String locations = this.parameters.getTrustRootLocations(); GlobusResource[] resources; resources = globusResolver.getResources(locations); long now = System.currentTimeMillis(); boolean found_policy = false; // Optimization: If we find a hash for this CA, only process that. // Otherwise, we will process all policies. for (GlobusResource resource : resources) { String filename = resource.getFilename(); // Note invalidPoliciesCache contains both filenames and hashes! Long invalidCacheTime = invalidPoliciesCache.get(filename); if ((invalidCacheTime != null) && (now - invalidCacheTime < 10*CACHE_TIME_MILLIS)) { continue; } if (!filename.startsWith(hash)) { continue; } if (!resource.isReadable()) { logger.debug("Cannot read: " + resource.getFilename()); continue; } try { loadSigningPolicy(resource, policyMap, signingPolicyFileMap); } catch (Exception e) { invalidCacheTime = invalidPoliciesCache.get(filename); if ((invalidCacheTime == null) || (now - invalidCacheTime >= 10*CACHE_TIME_MILLIS)) { logger.warn("Failed to load signing policy: " + filename); logger.debug("Failed to load signing policy: " + filename, e); invalidPoliciesCache.put(filename, now); invalidPoliciesCache.put(hash, now); } continue; } found_policy = true; } if (found_policy) { if (!validPoliciesCache.containsKey(hash)) { invalidPoliciesCache.put(hash, now); } return; } // Poor-man's implementation. Note it is much more expensive than a hashed directory for (GlobusResource resource : resources) { String filename = resource.getFilename(); Long invalidCacheTime = invalidPoliciesCache.get(filename); if ((invalidCacheTime != null) && (now - invalidCacheTime < 10*CACHE_TIME_MILLIS)) { continue; } try { loadSigningPolicy(resource, policyMap, signingPolicyFileMap); } catch (Exception e) { invalidCacheTime = invalidPoliciesCache.get(filename); if ((invalidCacheTime == null) || (now - invalidCacheTime >= 10*CACHE_TIME_MILLIS)) { logger.warn("Failed to load signing policy: " + filename); logger.debug("Failed to load signing policy: " + filename, e); invalidPoliciesCache.put(filename, now); invalidPoliciesCache.put(hash, now); } continue; } } if (!validPoliciesCache.containsKey(hash)) { invalidPoliciesCache.put(hash, now); } } private void loadSigningPolicy( GlobusResource policyResource, Map<String, SigningPolicy> policyMapToLoad, Map<URI, ResourceSigningPolicy> currentPolicyFileMap) throws SigningPolicyStoreException { URI uri; if (!policyResource.isReadable()) { throw new SigningPolicyStoreException("Cannot read file"); } try { uri = policyResource.getURI(); } catch (IOException e) { throw new SigningPolicyStoreException(e); } ResourceSigningPolicy filePolicy = this.signingPolicyFileMap.get(uri); if (filePolicy == null) { try { filePolicy = new ResourceSigningPolicy(policyResource); } catch (ResourceStoreException e) { throw new SigningPolicyStoreException(e); } } Collection<SigningPolicy> policies = filePolicy.getSigningPolicies(); currentPolicyFileMap.put(uri, filePolicy); if (policies != null) { long now = System.currentTimeMillis(); for (SigningPolicy policy : policies) { X500Principal caPrincipal = policy.getCASubjectDN(); policyMapToLoad.put(caPrincipal.getName(), policy); String hash = CertificateIOUtil.nameHash(caPrincipal); validPoliciesCache.put(hash, now); } } } }
apache-2.0
julnegre/diapoo
convert.sh
1131
#/bin/bash rep="photos/" cd $rep JHEAD=jhead SED=sed CONVERT=convert # rotation jhead -autorot *jpg jhead -autorot *JPG #rm small/*JPG for fichier in `ls *JPG` do if [ ! -f ../small/$fichier ] then echo "small - "$fichier convert $fichier -resize 25% ../small/$fichier fi if [ ! -f ../too_small/$fichier ] then echo "too_small - "$fichier convert $fichier -resize 5% ../too_small/$fichier fi done # rotation jhead -autorot ../small/*JPG jhead -autorot ../too_small/*JPG #rm small/*jpg for fichier in `ls *jpg` do if [ ! -f ../small/$fichier ] then echo $fichier convert $fichier -resize 60% ../small/$fichier fi if [ ! -f ../too_small/$fichier ] then convert $fichier -resize 15% ../too_small/$fichier fi done # rotation jhead -autorot ../small/*jpg jhead -autorot ../too_small/*jpg cd .. chmod -Rf 777 photos #lftp ftp://login:pwd@host -e "mirror -e -R /var/www/diapo/small /www/diapo/small ; quit" #lftp ftp://login:pwd@host -e "mirror -e -R /var/www/diapo/photos /www/diapo/photos ; quit" #lftp ftp://login:pwd@host -e "mirror -e -R /var/www/diapo/too_small /www/diapo/too_small ; quit"
apache-2.0
erichwang/presto
presto-iceberg/src/main/java/io/prestosql/plugin/iceberg/IcebergOrcFileWriter.java
11546
/* * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.prestosql.plugin.iceberg; import com.google.common.collect.ImmutableMap; import com.google.common.collect.ImmutableSet; import io.airlift.slice.Slice; import io.prestosql.orc.OrcDataSink; import io.prestosql.orc.OrcDataSource; import io.prestosql.orc.OrcWriteValidation; import io.prestosql.orc.OrcWriterOptions; import io.prestosql.orc.OrcWriterStats; import io.prestosql.orc.metadata.ColumnMetadata; import io.prestosql.orc.metadata.CompressionKind; import io.prestosql.orc.metadata.OrcColumnId; import io.prestosql.orc.metadata.OrcType; import io.prestosql.orc.metadata.statistics.ColumnStatistics; import io.prestosql.orc.metadata.statistics.DateStatistics; import io.prestosql.orc.metadata.statistics.DecimalStatistics; import io.prestosql.orc.metadata.statistics.DoubleStatistics; import io.prestosql.orc.metadata.statistics.IntegerStatistics; import io.prestosql.orc.metadata.statistics.StringStatistics; import io.prestosql.plugin.hive.orc.OrcFileWriter; import io.prestosql.spi.type.Type; import org.apache.iceberg.Metrics; import org.apache.iceberg.Schema; import org.apache.iceberg.types.Conversions; import org.apache.iceberg.types.Types; import java.math.BigDecimal; import java.nio.ByteBuffer; import java.util.List; import java.util.Map; import java.util.Optional; import java.util.OptionalInt; import java.util.Set; import java.util.concurrent.Callable; import java.util.function.Supplier; import static com.google.common.base.Verify.verify; import static io.prestosql.orc.metadata.OrcColumnId.ROOT_COLUMN; import static io.prestosql.plugin.hive.acid.AcidTransaction.NO_ACID_TRANSACTION; import static io.prestosql.plugin.iceberg.TypeConverter.ORC_ICEBERG_ID_KEY; import static java.lang.Math.toIntExact; import static java.util.Objects.requireNonNull; public class IcebergOrcFileWriter extends OrcFileWriter implements IcebergFileWriter { private final Schema icebergSchema; private final ColumnMetadata<OrcType> orcColumns; public IcebergOrcFileWriter( Schema icebergSchema, OrcDataSink orcDataSink, Callable<Void> rollbackAction, List<String> columnNames, List<Type> fileColumnTypes, ColumnMetadata<OrcType> fileColumnOrcTypes, CompressionKind compression, OrcWriterOptions options, boolean writeLegacyVersion, int[] fileInputColumnIndexes, Map<String, String> metadata, Optional<Supplier<OrcDataSource>> validationInputFactory, OrcWriteValidation.OrcWriteValidationMode validationMode, OrcWriterStats stats) { super(orcDataSink, NO_ACID_TRANSACTION, false, OptionalInt.empty(), rollbackAction, columnNames, fileColumnTypes, fileColumnOrcTypes, compression, options, writeLegacyVersion, fileInputColumnIndexes, metadata, validationInputFactory, validationMode, stats); this.icebergSchema = requireNonNull(icebergSchema, "icebergSchema is null"); orcColumns = fileColumnOrcTypes; } @Override public Metrics getMetrics() { return computeMetrics(icebergSchema, orcColumns, orcWriter.getFileRowCount(), orcWriter.getFileStats()); } private static Metrics computeMetrics(Schema icebergSchema, ColumnMetadata<OrcType> orcColumns, long fileRowCount, Optional<ColumnMetadata<ColumnStatistics>> columnStatistics) { if (columnStatistics.isEmpty()) { return new Metrics(fileRowCount, null, null, null, null, null); } // Columns that are descendants of LIST or MAP types are excluded because: // 1. Their stats are not used by Apache Iceberg to filter out data files // 2. Their record count can be larger than table-level row count. There's no good way to calculate nullCounts for them. // See https://github.com/apache/iceberg/pull/199#discussion_r429443627 Set<OrcColumnId> excludedColumns = getExcludedColumns(orcColumns); ImmutableMap.Builder<Integer, Long> valueCountsBuilder = ImmutableMap.builder(); ImmutableMap.Builder<Integer, Long> nullCountsBuilder = ImmutableMap.builder(); ImmutableMap.Builder<Integer, ByteBuffer> lowerBoundsBuilder = ImmutableMap.builder(); ImmutableMap.Builder<Integer, ByteBuffer> upperBoundsBuilder = ImmutableMap.builder(); // OrcColumnId(0) is the root column that represents file-level schema for (int i = 1; i < orcColumns.size(); i++) { OrcColumnId orcColumnId = new OrcColumnId(i); if (excludedColumns.contains(orcColumnId)) { continue; } OrcType orcColumn = orcColumns.get(orcColumnId); ColumnStatistics orcColumnStats = columnStatistics.get().get(orcColumnId); int icebergId = getIcebergId(orcColumn); Types.NestedField icebergField = icebergSchema.findField(icebergId); verify(icebergField != null, "Cannot find Iceberg column with ID %s in schema %s", icebergId, icebergSchema); valueCountsBuilder.put(icebergId, fileRowCount); if (orcColumnStats.hasNumberOfValues()) { nullCountsBuilder.put(icebergId, fileRowCount - orcColumnStats.getNumberOfValues()); } toIcebergMinMax(orcColumnStats, icebergField.type()).ifPresent(minMax -> { lowerBoundsBuilder.put(icebergId, minMax.getMin()); upperBoundsBuilder.put(icebergId, minMax.getMax()); }); } Map<Integer, Long> valueCounts = valueCountsBuilder.build(); Map<Integer, Long> nullCounts = nullCountsBuilder.build(); Map<Integer, ByteBuffer> lowerBounds = lowerBoundsBuilder.build(); Map<Integer, ByteBuffer> upperBounds = upperBoundsBuilder.build(); return new Metrics( fileRowCount, null, // TODO: Add column size accounting to ORC column writers valueCounts.isEmpty() ? null : valueCounts, nullCounts.isEmpty() ? null : nullCounts, lowerBounds.isEmpty() ? null : lowerBounds, upperBounds.isEmpty() ? null : upperBounds); } private static Set<OrcColumnId> getExcludedColumns(ColumnMetadata<OrcType> orcColumns) { ImmutableSet.Builder<OrcColumnId> excludedColumns = ImmutableSet.builder(); populateExcludedColumns(orcColumns, ROOT_COLUMN, false, excludedColumns); return excludedColumns.build(); } private static void populateExcludedColumns(ColumnMetadata<OrcType> orcColumns, OrcColumnId orcColumnId, boolean exclude, ImmutableSet.Builder<OrcColumnId> excludedColumns) { if (exclude) { excludedColumns.add(orcColumnId); } OrcType orcColumn = orcColumns.get(orcColumnId); switch (orcColumn.getOrcTypeKind()) { case LIST: case MAP: for (OrcColumnId child : orcColumn.getFieldTypeIndexes()) { populateExcludedColumns(orcColumns, child, true, excludedColumns); } return; case STRUCT: for (OrcColumnId child : orcColumn.getFieldTypeIndexes()) { populateExcludedColumns(orcColumns, child, exclude, excludedColumns); } return; } } private static int getIcebergId(OrcType orcColumn) { String icebergId = orcColumn.getAttributes().get(ORC_ICEBERG_ID_KEY); verify(icebergId != null, "ORC column %s doesn't have an associated Iceberg ID", orcColumn); return Integer.parseInt(icebergId); } private static Optional<IcebergMinMax> toIcebergMinMax(ColumnStatistics orcColumnStats, org.apache.iceberg.types.Type icebergType) { IntegerStatistics integerStatistics = orcColumnStats.getIntegerStatistics(); if (integerStatistics != null) { Object min = integerStatistics.getMin(); Object max = integerStatistics.getMax(); if (min == null || max == null) { return Optional.empty(); } if (icebergType.typeId() == org.apache.iceberg.types.Type.TypeID.INTEGER) { min = toIntExact((Long) min); max = toIntExact((Long) max); } return Optional.of(new IcebergMinMax(icebergType, min, max)); } DoubleStatistics doubleStatistics = orcColumnStats.getDoubleStatistics(); if (doubleStatistics != null) { Object min = doubleStatistics.getMin(); Object max = doubleStatistics.getMax(); if (min == null || max == null) { return Optional.empty(); } if (icebergType.typeId() == org.apache.iceberg.types.Type.TypeID.FLOAT) { min = ((Double) min).floatValue(); max = ((Double) max).floatValue(); } return Optional.of(new IcebergMinMax(icebergType, min, max)); } StringStatistics stringStatistics = orcColumnStats.getStringStatistics(); if (stringStatistics != null) { Slice min = stringStatistics.getMin(); Slice max = stringStatistics.getMax(); if (min == null || max == null) { return Optional.empty(); } return Optional.of(new IcebergMinMax(icebergType, min.toStringUtf8(), max.toStringUtf8())); } DateStatistics dateStatistics = orcColumnStats.getDateStatistics(); if (dateStatistics != null) { Integer min = dateStatistics.getMin(); Integer max = dateStatistics.getMax(); if (min == null || max == null) { return Optional.empty(); } return Optional.of(new IcebergMinMax(icebergType, min, max)); } DecimalStatistics decimalStatistics = orcColumnStats.getDecimalStatistics(); if (decimalStatistics != null) { BigDecimal min = decimalStatistics.getMin(); BigDecimal max = decimalStatistics.getMax(); if (min == null || max == null) { return Optional.empty(); } min = min.setScale(((Types.DecimalType) icebergType).scale()); max = max.setScale(((Types.DecimalType) icebergType).scale()); return Optional.of(new IcebergMinMax(icebergType, min, max)); } return Optional.empty(); } private static class IcebergMinMax { private ByteBuffer min; private ByteBuffer max; private IcebergMinMax(org.apache.iceberg.types.Type type, Object min, Object max) { this.min = Conversions.toByteBuffer(type, min); this.max = Conversions.toByteBuffer(type, max); } public ByteBuffer getMin() { return min; } public ByteBuffer getMax() { return max; } } }
apache-2.0
EvilMcJerkface/alluxio
core/server/master/src/main/java/alluxio/master/file/DefaultFileSystemMaster.java
193983
/* * The Alluxio Open Foundation licenses this work under the Apache License, version 2.0 * (the "License"). You may not use this work except in compliance with the License, which is * available at www.apache.org/licenses/LICENSE-2.0 * * This software is distributed on an "AS IS" basis, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, * either express or implied, as more fully set forth in the License. * * See the NOTICE file distributed with this work for information regarding copyright ownership. */ package alluxio.master.file; import static alluxio.metrics.MetricInfo.UFS_OP_SAVED_PREFIX; import alluxio.AlluxioURI; import alluxio.ClientContext; import alluxio.Constants; import alluxio.Server; import alluxio.client.job.JobMasterClient; import alluxio.client.job.JobMasterClientPool; import alluxio.clock.SystemClock; import alluxio.collections.Pair; import alluxio.collections.PrefixList; import alluxio.conf.PropertyKey; import alluxio.conf.ServerConfiguration; import alluxio.exception.AccessControlException; import alluxio.exception.AlluxioException; import alluxio.exception.BlockInfoException; import alluxio.exception.ConnectionFailedException; import alluxio.exception.DirectoryNotEmptyException; import alluxio.exception.ExceptionMessage; import alluxio.exception.FileAlreadyCompletedException; import alluxio.exception.FileAlreadyExistsException; import alluxio.exception.FileDoesNotExistException; import alluxio.exception.InvalidFileSizeException; import alluxio.exception.InvalidPathException; import alluxio.exception.PreconditionMessage; import alluxio.exception.UnexpectedAlluxioException; import alluxio.exception.status.FailedPreconditionException; import alluxio.exception.status.InvalidArgumentException; import alluxio.exception.status.NotFoundException; import alluxio.exception.status.PermissionDeniedException; import alluxio.exception.status.ResourceExhaustedException; import alluxio.exception.status.UnavailableException; import alluxio.file.options.DescendantType; import alluxio.grpc.DeletePOptions; import alluxio.grpc.FileSystemMasterCommonPOptions; import alluxio.grpc.GrpcService; import alluxio.grpc.GrpcUtils; import alluxio.grpc.LoadDescendantPType; import alluxio.grpc.LoadMetadataPOptions; import alluxio.grpc.LoadMetadataPType; import alluxio.grpc.MountPOptions; import alluxio.grpc.ServiceType; import alluxio.grpc.SetAclAction; import alluxio.grpc.SetAttributePOptions; import alluxio.grpc.TtlAction; import alluxio.heartbeat.HeartbeatContext; import alluxio.heartbeat.HeartbeatThread; import alluxio.job.plan.persist.PersistConfig; import alluxio.job.wire.JobInfo; import alluxio.master.file.contexts.CallTracker; import alluxio.master.CoreMaster; import alluxio.master.CoreMasterContext; import alluxio.master.ProtobufUtils; import alluxio.master.audit.AsyncUserAccessAuditLogWriter; import alluxio.master.audit.AuditContext; import alluxio.master.block.BlockId; import alluxio.master.block.BlockMaster; import alluxio.master.file.activesync.ActiveSyncManager; import alluxio.master.file.contexts.CheckAccessContext; import alluxio.master.file.contexts.CheckConsistencyContext; import alluxio.master.file.contexts.CompleteFileContext; import alluxio.master.file.contexts.CreateDirectoryContext; import alluxio.master.file.contexts.CreateFileContext; import alluxio.master.file.contexts.DeleteContext; import alluxio.master.file.contexts.FreeContext; import alluxio.master.file.contexts.GetStatusContext; import alluxio.master.file.contexts.InternalOperationContext; import alluxio.master.file.contexts.ListStatusContext; import alluxio.master.file.contexts.LoadMetadataContext; import alluxio.master.file.contexts.MountContext; import alluxio.master.file.contexts.OperationContext; import alluxio.master.file.contexts.RenameContext; import alluxio.master.file.contexts.ScheduleAsyncPersistenceContext; import alluxio.master.file.contexts.SetAclContext; import alluxio.master.file.contexts.SetAttributeContext; import alluxio.master.file.contexts.WorkerHeartbeatContext; import alluxio.master.file.meta.FileSystemMasterView; import alluxio.master.file.meta.Inode; import alluxio.master.file.meta.InodeDirectory; import alluxio.master.file.meta.InodeDirectoryIdGenerator; import alluxio.master.file.meta.InodeDirectoryView; import alluxio.master.file.meta.InodeFile; import alluxio.master.file.meta.InodeLockManager; import alluxio.master.file.meta.InodePathPair; import alluxio.master.file.meta.InodeTree; import alluxio.master.file.meta.InodeTree.LockPattern; import alluxio.master.file.meta.LockedInodePath; import alluxio.master.file.meta.LockedInodePathList; import alluxio.master.file.meta.LockingScheme; import alluxio.master.file.meta.MountTable; import alluxio.master.file.meta.PersistenceState; import alluxio.master.file.meta.UfsAbsentPathCache; import alluxio.master.file.meta.UfsBlockLocationCache; import alluxio.master.file.meta.UfsSyncPathCache; import alluxio.master.file.meta.options.MountInfo; import alluxio.master.journal.DelegatingJournaled; import alluxio.master.journal.JournalContext; import alluxio.master.journal.Journaled; import alluxio.master.journal.JournaledGroup; import alluxio.master.journal.checkpoint.CheckpointName; import alluxio.master.metastore.DelegatingReadOnlyInodeStore; import alluxio.master.metastore.InodeStore; import alluxio.master.metastore.ReadOnlyInodeStore; import alluxio.master.metrics.TimeSeriesStore; import alluxio.metrics.Metric; import alluxio.metrics.MetricInfo; import alluxio.metrics.MetricKey; import alluxio.metrics.MetricsSystem; import alluxio.metrics.TimeSeries; import alluxio.proto.journal.File; import alluxio.proto.journal.File.NewBlockEntry; import alluxio.proto.journal.File.RenameEntry; import alluxio.proto.journal.File.SetAclEntry; import alluxio.proto.journal.File.UpdateInodeEntry; import alluxio.proto.journal.File.UpdateInodeFileEntry; import alluxio.proto.journal.File.UpdateInodeFileEntry.Builder; import alluxio.proto.journal.Journal.JournalEntry; import alluxio.resource.CloseableResource; import alluxio.resource.LockResource; import alluxio.retry.CountingRetry; import alluxio.retry.RetryPolicy; import alluxio.security.authentication.AuthType; import alluxio.security.authentication.AuthenticatedClientUser; import alluxio.security.authentication.ClientIpAddressInjector; import alluxio.security.authorization.AclEntry; import alluxio.security.authorization.AclEntryType; import alluxio.security.authorization.Mode; import alluxio.underfs.Fingerprint; import alluxio.underfs.MasterUfsManager; import alluxio.underfs.UfsManager; import alluxio.underfs.UfsMode; import alluxio.underfs.UfsStatus; import alluxio.underfs.UnderFileSystem; import alluxio.underfs.UnderFileSystemConfiguration; import alluxio.util.CommonUtils; import alluxio.util.IdUtils; import alluxio.util.LogUtils; import alluxio.util.ModeUtils; import alluxio.util.SecurityUtils; import alluxio.util.ThreadFactoryUtils; import alluxio.util.UnderFileSystemUtils; import alluxio.util.executor.ExecutorServiceFactories; import alluxio.util.executor.ExecutorServiceFactory; import alluxio.util.io.PathUtils; import alluxio.util.proto.ProtoUtils; import alluxio.wire.BlockInfo; import alluxio.wire.BlockLocation; import alluxio.wire.CommandType; import alluxio.wire.FileBlockInfo; import alluxio.wire.FileInfo; import alluxio.wire.FileSystemCommand; import alluxio.wire.FileSystemCommandOptions; import alluxio.wire.MountPointInfo; import alluxio.wire.PersistCommandOptions; import alluxio.wire.PersistFile; import alluxio.wire.SyncPointInfo; import alluxio.wire.UfsInfo; import alluxio.wire.WorkerInfo; import alluxio.worker.job.JobMasterClientContext; import com.codahale.metrics.Counter; import com.codahale.metrics.Gauge; import com.codahale.metrics.MetricRegistry; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ImmutableSet; import com.google.common.collect.Iterables; import com.google.common.collect.Sets; import io.grpc.ServerInterceptors; import org.apache.commons.lang3.StringUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import java.io.FileNotFoundException; import java.io.IOException; import java.util.ArrayList; import java.util.Collection; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import java.util.SortedMap; import java.util.Stack; import java.util.TreeMap; import java.util.concurrent.Callable; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutorService; import java.util.concurrent.LinkedBlockingQueue; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; import java.util.function.Function; import java.util.function.Supplier; import java.util.stream.Collectors; import java.util.stream.Stream; import javax.annotation.Nullable; import javax.annotation.concurrent.NotThreadSafe; /** * The master that handles all file system metadata management. */ @NotThreadSafe // TODO(jiri): make thread-safe (c.f. ALLUXIO-1664) public final class DefaultFileSystemMaster extends CoreMaster implements FileSystemMaster, DelegatingJournaled { private static final Logger LOG = LoggerFactory.getLogger(DefaultFileSystemMaster.class); private static final Set<Class<? extends Server>> DEPS = ImmutableSet.of(BlockMaster.class); /** The number of threads to use in the {@link #mPersistCheckerPool}. */ private static final int PERSIST_CHECKER_POOL_THREADS = 128; /** * Locking in DefaultFileSystemMaster * * Individual paths are locked in the inode tree. In order to read or write any inode, the path * must be locked. A path is locked via one of the lock methods in {@link InodeTree}, such as * {@link InodeTree#lockInodePath(AlluxioURI, LockMode)} or * {@link InodeTree#lockFullInodePath(AlluxioURI, LockMode)}. These lock methods return * an {@link LockedInodePath}, which represents a locked path of inodes. These locked paths * ({@link LockedInodePath}) must be unlocked. In order to ensure a locked * {@link LockedInodePath} is always unlocked, the following paradigm is recommended: * * <p><blockquote><pre> * try (LockedInodePath inodePath = mInodeTree.lockInodePath(path, LockPattern.READ)) { * ... * } * </pre></blockquote> * * When locking a path in the inode tree, it is possible that other concurrent operations have * modified the inode tree while a thread is waiting to acquire a lock on the inode. Lock * acquisitions throw {@link InvalidPathException} to indicate that the inode structure is no * longer consistent with what the caller original expected, for example if the inode * previously obtained at /pathA has been renamed to /pathB during the wait for the inode lock. * Methods which specifically act on a path will propagate this exception to the caller, while * methods which iterate over child nodes can safely ignore the exception and treat the inode * as no longer a child. * * JournalContext, BlockDeletionContext, and RpcContext * * RpcContext is an aggregator for various contexts which get passed around through file system * master methods. * * Currently there are two types of contexts that get passed around: {@link JournalContext} and * {@link BlockDeletionContext}. These contexts are used to register work that should be done when * the context closes. The journal context tracks journal entries which need to be flushed, while * the block deletion context tracks which blocks need to be deleted in the {@link BlockMaster}. * * File system master journal entries should be written before blocks are deleted in the block * master, so journal context should always be closed before block deletion context. In order to * ensure that contexts are closed and closed in the right order, the following paradign is * recommended: * * <p><blockquote><pre> * try (RpcContext rpcContext = createRpcContext()) { * // access journal context with rpcContext.getJournalContext() * // access block deletion context with rpcContext.getBlockDeletionContext() * ... * } * </pre></blockquote> * * When used in conjunction with {@link LockedInodePath} and {@link AuditContext}, the usage * should look like * * <p><blockquote><pre> * try (RpcContext rpcContext = createRpcContext(); * LockedInodePath inodePath = mInodeTree.lockInodePath(...); * FileSystemMasterAuditContext auditContext = createAuditContext(...)) { * ... * } * </pre></blockquote> * * NOTE: Because resources are released in the opposite order they are acquired, the * {@link JournalContext}, {@link BlockDeletionContext}, or {@link RpcContext} resources should be * always created before any {@link LockedInodePath} resources to avoid holding an inode path lock * while waiting for journal IO. * * User access audit logging in the FileSystemMaster * * User accesses to file system metadata should be audited. The intent to write audit log and the * actual writing of the audit log is decoupled so that operations are not holding metadata locks * waiting on the audit log IO. In particular {@link AsyncUserAccessAuditLogWriter} uses a * separate thread to perform actual audit log IO. In order for audit log entries to preserve * the order of file system operations, the intention of auditing should be submitted to * {@link AsyncUserAccessAuditLogWriter} while holding locks on the inode path. That said, the * {@link AuditContext} resources should always live within the scope of {@link LockedInodePath}, * i.e. created after {@link LockedInodePath}. Otherwise, the order of audit log entries may not * reflect the actual order of the user accesses. * Resources are released in the opposite order they are acquired, the * {@link AuditContext#close()} method is called before {@link LockedInodePath#close()}, thus * guaranteeing the order. * * Method Conventions in the FileSystemMaster * * All of the flow of the FileSystemMaster follow a convention. There are essentially 4 main * types of methods: * (A) public api methods * (B) private (or package private) internal methods * * (A) public api methods: * These methods are public and are accessed by the RPC and REST APIs. These methods lock all * the required paths, and also perform all permission checking. * (A) cannot call (A) * (A) can call (B) * * (B) private (or package private) internal methods: * These methods perform the rest of the work. The names of these * methods are suffixed by "Internal". These are typically called by the (A) methods. * (B) cannot call (A) * (B) can call (B) */ /** Handle to the block master. */ private final BlockMaster mBlockMaster; /** This manages the file system inode structure. This must be journaled. */ private final InodeTree mInodeTree; /** Store for holding inodes. */ private final ReadOnlyInodeStore mInodeStore; /** This manages inode locking. */ private final InodeLockManager mInodeLockManager; /** This manages the file system mount points. */ private final MountTable mMountTable; /** This generates unique directory ids. This must be journaled. */ private final InodeDirectoryIdGenerator mDirectoryIdGenerator; /** This checks user permissions on different operations. */ private final PermissionChecker mPermissionChecker; /** List of paths to always keep in memory. */ private final PrefixList mWhitelist; /** A pool of job master clients. */ private final JobMasterClientPool mJobMasterClientPool; /** Set of file IDs to persist. */ private final Map<Long, alluxio.time.ExponentialTimer> mPersistRequests; /** Map from file IDs to persist jobs. */ private final Map<Long, PersistJob> mPersistJobs; /** The manager of all ufs. */ private final MasterUfsManager mUfsManager; /** This caches absent paths in the UFS. */ private final UfsAbsentPathCache mUfsAbsentPathCache; /** This caches block locations in the UFS. */ private final UfsBlockLocationCache mUfsBlockLocationCache; /** This caches paths which have been synced with UFS. */ private final UfsSyncPathCache mUfsSyncPathCache; /** The {@link JournaledGroup} representing all the subcomponents which require journaling. */ private final JournaledGroup mJournaledGroup; /** List of strings which are blacklisted from async persist. */ private final List<String> mPersistBlacklist; /** Thread pool which asynchronously handles the completion of persist jobs. */ private java.util.concurrent.ThreadPoolExecutor mPersistCheckerPool; private ActiveSyncManager mSyncManager; /** Log writer for user access audit log. */ private AsyncUserAccessAuditLogWriter mAsyncAuditLogWriter; /** Stores the time series for various metrics which are exposed in the UI. */ private TimeSeriesStore mTimeSeriesStore; private AccessTimeUpdater mAccessTimeUpdater; /** Used to check pending/running backup from RPCs. */ private CallTracker mStateLockCallTracker; final ThreadPoolExecutor mSyncPrefetchExecutor = new ThreadPoolExecutor( ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_UFS_PREFETCH_POOL_SIZE), ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_UFS_PREFETCH_POOL_SIZE), 1, TimeUnit.MINUTES, new LinkedBlockingQueue<>(), ThreadFactoryUtils.build("alluxio-ufs-sync-prefetch-%d", false)); final ThreadPoolExecutor mSyncMetadataExecutor = new ThreadPoolExecutor( ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_EXECUTOR_POOL_SIZE), ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_EXECUTOR_POOL_SIZE), 1, TimeUnit.MINUTES, new LinkedBlockingQueue<>(), ThreadFactoryUtils.build("alluxio-ufs-sync-%d", false)); final ThreadPoolExecutor mActiveSyncMetadataExecutor = new ThreadPoolExecutor( ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_EXECUTOR_POOL_SIZE), ServerConfiguration.getInt(PropertyKey.MASTER_METADATA_SYNC_EXECUTOR_POOL_SIZE), 1, TimeUnit.MINUTES, new LinkedBlockingQueue<>(), ThreadFactoryUtils.build("alluxio-ufs-active-sync-%d", false)); /** * Creates a new instance of {@link DefaultFileSystemMaster}. * * @param blockMaster a block master handle * @param masterContext the context for Alluxio master */ public DefaultFileSystemMaster(BlockMaster blockMaster, CoreMasterContext masterContext) { this(blockMaster, masterContext, ExecutorServiceFactories.cachedThreadPool(Constants.FILE_SYSTEM_MASTER_NAME)); } /** * Creates a new instance of {@link DefaultFileSystemMaster}. * * @param blockMaster a block master handle * @param masterContext the context for Alluxio master * @param executorServiceFactory a factory for creating the executor service to use for running * maintenance threads */ public DefaultFileSystemMaster(BlockMaster blockMaster, CoreMasterContext masterContext, ExecutorServiceFactory executorServiceFactory) { super(masterContext, new SystemClock(), executorServiceFactory); mBlockMaster = blockMaster; mDirectoryIdGenerator = new InodeDirectoryIdGenerator(mBlockMaster); mUfsManager = masterContext.getUfsManager(); mMountTable = new MountTable(mUfsManager, getRootMountInfo(mUfsManager)); mInodeLockManager = new InodeLockManager(); InodeStore inodeStore = masterContext.getInodeStoreFactory().apply(mInodeLockManager); mInodeStore = new DelegatingReadOnlyInodeStore(inodeStore); mInodeTree = new InodeTree(inodeStore, mBlockMaster, mDirectoryIdGenerator, mMountTable, mInodeLockManager); // TODO(gene): Handle default config value for whitelist. mWhitelist = new PrefixList(ServerConfiguration.getList(PropertyKey.MASTER_WHITELIST, ",")); mPersistBlacklist = ServerConfiguration.isSet(PropertyKey.MASTER_PERSISTENCE_BLACKLIST) ? ServerConfiguration.getList(PropertyKey.MASTER_PERSISTENCE_BLACKLIST, ",") : Collections.emptyList(); mStateLockCallTracker = new CallTracker() { @Override public boolean isCancelled() { return masterContext.getStateLockManager().interruptCycleTicking(); } @Override public Type getType() { return Type.STATE_LOCK_TRACKER; } }; mPermissionChecker = new DefaultPermissionChecker(mInodeTree); mJobMasterClientPool = new JobMasterClientPool(JobMasterClientContext .newBuilder(ClientContext.create(ServerConfiguration.global())).build()); mPersistRequests = new java.util.concurrent.ConcurrentHashMap<>(); mPersistJobs = new java.util.concurrent.ConcurrentHashMap<>(); mUfsAbsentPathCache = UfsAbsentPathCache.Factory.create(mMountTable); mUfsBlockLocationCache = UfsBlockLocationCache.Factory.create(mMountTable); mUfsSyncPathCache = new UfsSyncPathCache(); mSyncManager = new ActiveSyncManager(mMountTable, this); mTimeSeriesStore = new TimeSeriesStore(); mAccessTimeUpdater = new AccessTimeUpdater(this, mInodeTree, masterContext.getJournalSystem()); // Sync executors should allow core threads to time out mSyncPrefetchExecutor.allowCoreThreadTimeOut(true); mSyncMetadataExecutor.allowCoreThreadTimeOut(true); mActiveSyncMetadataExecutor.allowCoreThreadTimeOut(true); // The mount table should come after the inode tree because restoring the mount table requires // that the inode tree is already restored. ArrayList<Journaled> journaledComponents = new ArrayList<Journaled>() { { add(mInodeTree); add(mDirectoryIdGenerator); add(mMountTable); add(mUfsManager); add(mSyncManager); } }; mJournaledGroup = new JournaledGroup(journaledComponents, CheckpointName.FILE_SYSTEM_MASTER); resetState(); Metrics.registerGauges(this, mUfsManager); } private static MountInfo getRootMountInfo(MasterUfsManager ufsManager) { try (CloseableResource<UnderFileSystem> resource = ufsManager.getRoot().acquireUfsResource()) { boolean shared = resource.get().isObjectStorage() && ServerConfiguration.getBoolean(PropertyKey.UNDERFS_OBJECT_STORE_MOUNT_SHARED_PUBLICLY); boolean readonly = ServerConfiguration.getBoolean( PropertyKey.MASTER_MOUNT_TABLE_ROOT_READONLY); String rootUfsUri = PathUtils.normalizePath( ServerConfiguration.get(PropertyKey.MASTER_MOUNT_TABLE_ROOT_UFS), AlluxioURI.SEPARATOR); Map<String, String> rootUfsConf = ServerConfiguration.getNestedProperties(PropertyKey.MASTER_MOUNT_TABLE_ROOT_OPTION); MountPOptions mountOptions = MountContext .mergeFrom(MountPOptions.newBuilder().setShared(shared).setReadOnly(readonly) .putAllProperties(rootUfsConf)) .getOptions().build(); return new MountInfo(new AlluxioURI(MountTable.ROOT), new AlluxioURI(rootUfsUri), IdUtils.ROOT_MOUNT_ID, mountOptions); } } @Override public Map<ServiceType, GrpcService> getServices() { Map<ServiceType, GrpcService> services = new HashMap<>(); services.put(ServiceType.FILE_SYSTEM_MASTER_CLIENT_SERVICE, new GrpcService(ServerInterceptors .intercept(new FileSystemMasterClientServiceHandler(this), new ClientIpAddressInjector()))); services.put(ServiceType.FILE_SYSTEM_MASTER_JOB_SERVICE, new GrpcService(new FileSystemMasterJobServiceHandler(this))); services.put(ServiceType.FILE_SYSTEM_MASTER_WORKER_SERVICE, new GrpcService(new FileSystemMasterWorkerServiceHandler(this))); return services; } @Override public String getName() { return Constants.FILE_SYSTEM_MASTER_NAME; } @Override public Set<Class<? extends Server>> getDependencies() { return DEPS; } @Override public Journaled getDelegate() { return mJournaledGroup; } @Override public void start(Boolean isPrimary) throws IOException { super.start(isPrimary); if (isPrimary) { LOG.info("Starting fs master as primary"); InodeDirectory root = mInodeTree.getRoot(); if (root == null) { try (JournalContext context = createJournalContext()) { mInodeTree.initializeRoot( SecurityUtils.getOwner(mMasterContext.getUserState()), SecurityUtils.getGroup(mMasterContext.getUserState(), ServerConfiguration.global()), ModeUtils.applyDirectoryUMask(Mode.createFullAccess(), ServerConfiguration.get(PropertyKey.SECURITY_AUTHORIZATION_PERMISSION_UMASK)), context); } } else if (!ServerConfiguration.getBoolean(PropertyKey.MASTER_SKIP_ROOT_ACL_CHECK)) { // For backwards-compatibility: // Empty root owner indicates that previously the master had no security. In this case, the // master is allowed to be started with security turned on. String serverOwner = SecurityUtils.getOwner(mMasterContext.getUserState()); if (SecurityUtils.isSecurityEnabled(ServerConfiguration.global()) && !root.getOwner().isEmpty() && !root.getOwner().equals(serverOwner)) { // user is not the previous owner throw new PermissionDeniedException(ExceptionMessage.PERMISSION_DENIED.getMessage(String .format("Unauthorized user on root. inode owner: %s current user: %s", root.getOwner(), serverOwner))); } } // Initialize the ufs manager from the mount table. for (String key : mMountTable.getMountTable().keySet()) { if (key.equals(MountTable.ROOT)) { continue; } MountInfo mountInfo = mMountTable.getMountTable().get(key); UnderFileSystemConfiguration ufsConf = UnderFileSystemConfiguration.defaults(ServerConfiguration.global()) .createMountSpecificConf(mountInfo.getOptions().getPropertiesMap()) .setReadOnly(mountInfo.getOptions().getReadOnly()) .setShared(mountInfo.getOptions().getShared()); mUfsManager.addMount(mountInfo.getMountId(), mountInfo.getUfsUri(), ufsConf); } // Startup Checks and Periodic Threads. // Rebuild the list of persist jobs (mPersistJobs) and map of pending persist requests // (mPersistRequests) long persistInitialIntervalMs = ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS); long persistMaxIntervalMs = ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS); long persistMaxWaitMs = ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS); for (Long id : mInodeTree.getToBePersistedIds()) { Inode inode = mInodeStore.get(id).get(); if (inode.isDirectory() || !inode.asFile().isCompleted() // When file is completed it is added to persist reqs || inode.getPersistenceState() != PersistenceState.TO_BE_PERSISTED || inode.asFile().getShouldPersistTime() == Constants.NO_AUTO_PERSIST) { continue; } InodeFile inodeFile = inode.asFile(); if (inodeFile.getPersistJobId() == Constants.PERSISTENCE_INVALID_JOB_ID) { mPersistRequests.put(inodeFile.getId(), new alluxio.time.ExponentialTimer( persistInitialIntervalMs, persistMaxIntervalMs, getPersistenceWaitTime(inodeFile.getShouldPersistTime()), persistMaxWaitMs)); } else { AlluxioURI path; try { path = mInodeTree.getPath(inodeFile); } catch (FileDoesNotExistException e) { LOG.error("Failed to determine path for inode with id {}", id, e); continue; } addPersistJob(id, inodeFile.getPersistJobId(), getPersistenceWaitTime(inodeFile.getShouldPersistTime()), path, inodeFile.getTempUfsPath()); } } if (ServerConfiguration .getBoolean(PropertyKey.MASTER_STARTUP_BLOCK_INTEGRITY_CHECK_ENABLED)) { validateInodeBlocks(true); } int blockIntegrityCheckInterval = (int) ServerConfiguration .getMs(PropertyKey.MASTER_PERIODIC_BLOCK_INTEGRITY_CHECK_INTERVAL); if (blockIntegrityCheckInterval > 0) { // negative or zero interval implies disabled getExecutorService().submit( new HeartbeatThread(HeartbeatContext.MASTER_BLOCK_INTEGRITY_CHECK, new BlockIntegrityChecker(this), blockIntegrityCheckInterval, ServerConfiguration.global(), mMasterContext.getUserState())); } getExecutorService().submit( new HeartbeatThread(HeartbeatContext.MASTER_TTL_CHECK, new InodeTtlChecker(this, mInodeTree), (int) ServerConfiguration.getMs(PropertyKey.MASTER_TTL_CHECKER_INTERVAL_MS), ServerConfiguration.global(), mMasterContext.getUserState())); getExecutorService().submit( new HeartbeatThread(HeartbeatContext.MASTER_LOST_FILES_DETECTION, new LostFileDetector(this, mInodeTree), (int) ServerConfiguration.getMs(PropertyKey .MASTER_LOST_WORKER_FILE_DETECTION_INTERVAL), ServerConfiguration.global(), mMasterContext.getUserState())); getExecutorService().submit(new HeartbeatThread( HeartbeatContext.MASTER_REPLICATION_CHECK, new alluxio.master.file.replication.ReplicationChecker(mInodeTree, mBlockMaster, mSafeModeManager, mJobMasterClientPool), (int) ServerConfiguration.getMs(PropertyKey.MASTER_REPLICATION_CHECK_INTERVAL_MS), ServerConfiguration.global(), mMasterContext.getUserState())); getExecutorService().submit( new HeartbeatThread(HeartbeatContext.MASTER_PERSISTENCE_SCHEDULER, new PersistenceScheduler(), (int) ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_SCHEDULER_INTERVAL_MS), ServerConfiguration.global(), mMasterContext.getUserState())); mPersistCheckerPool = new java.util.concurrent.ThreadPoolExecutor(PERSIST_CHECKER_POOL_THREADS, PERSIST_CHECKER_POOL_THREADS, 1, java.util.concurrent.TimeUnit.MINUTES, new LinkedBlockingQueue<Runnable>(), alluxio.util.ThreadFactoryUtils.build("Persist-Checker-%d", true)); mPersistCheckerPool.allowCoreThreadTimeOut(true); getExecutorService().submit( new HeartbeatThread(HeartbeatContext.MASTER_PERSISTENCE_CHECKER, new PersistenceChecker(), (int) ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_CHECKER_INTERVAL_MS), ServerConfiguration.global(), mMasterContext.getUserState())); getExecutorService().submit( new HeartbeatThread(HeartbeatContext.MASTER_METRICS_TIME_SERIES, new TimeSeriesRecorder(), (int) ServerConfiguration.getMs(PropertyKey.MASTER_METRICS_TIME_SERIES_INTERVAL), ServerConfiguration.global(), mMasterContext.getUserState())); if (ServerConfiguration.getBoolean(PropertyKey.MASTER_AUDIT_LOGGING_ENABLED)) { mAsyncAuditLogWriter = new AsyncUserAccessAuditLogWriter(); mAsyncAuditLogWriter.start(); } if (ServerConfiguration.getBoolean(PropertyKey.UNDERFS_CLEANUP_ENABLED)) { getExecutorService().submit( new HeartbeatThread(HeartbeatContext.MASTER_UFS_CLEANUP, new UfsCleaner(this), (int) ServerConfiguration.getMs(PropertyKey.UNDERFS_CLEANUP_INTERVAL), ServerConfiguration.global(), mMasterContext.getUserState())); } mAccessTimeUpdater.start(); mSyncManager.start(); } } @Override public void stop() throws IOException { if (mAsyncAuditLogWriter != null) { mAsyncAuditLogWriter.stop(); mAsyncAuditLogWriter = null; } mSyncManager.stop(); mAccessTimeUpdater.stop(); super.stop(); } @Override public void close() throws IOException { super.close(); mInodeTree.close(); mInodeLockManager.close(); try { mSyncMetadataExecutor.shutdownNow(); mSyncMetadataExecutor.awaitTermination(5, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOG.warn("Failed to wait for metadata sync executor to shut down."); } try { mSyncPrefetchExecutor.shutdownNow(); mSyncPrefetchExecutor.awaitTermination(5, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOG.warn("Failed to wait for ufs prefetch executor to shut down."); } try { mActiveSyncMetadataExecutor.shutdownNow(); mActiveSyncMetadataExecutor.awaitTermination(5, TimeUnit.SECONDS); } catch (InterruptedException e) { Thread.currentThread().interrupt(); LOG.warn("Failed to wait for active sync executor to shut down."); } } @Override public void validateInodeBlocks(boolean repair) throws UnavailableException { mBlockMaster.validateBlocks((blockId) -> { long fileId = IdUtils.fileIdFromBlockId(blockId); return mInodeTree.inodeIdExists(fileId); }, repair); } @Override public void cleanupUfs() { for (Map.Entry<String, MountInfo> mountPoint : mMountTable.getMountTable().entrySet()) { MountInfo info = mountPoint.getValue(); if (info.getOptions().getReadOnly()) { continue; } try (CloseableResource<UnderFileSystem> ufsResource = mUfsManager.get(info.getMountId()).acquireUfsResource()) { ufsResource.get().cleanup(); } catch (UnavailableException | NotFoundException e) { LOG.error("No UFS cached for {}", info, e); } catch (IOException e) { LOG.error("Failed in cleanup UFS {}.", info, e); } } } @Override public long getFileId(AlluxioURI path) throws AccessControlException, UnavailableException { return getFileIdInternal(path, true); } private long getFileIdInternal(AlluxioURI path, boolean checkPermission) throws AccessControlException, UnavailableException { try (RpcContext rpcContext = createRpcContext()) { /* In order to prevent locking twice on RPCs where metadata does _not_ need to be loaded, we use a two-step scheme as an optimization to prevent the extra lock. loadMetadataIfNotExists requires a lock on the tree to determine if the path should be loaded before executing. To prevent the extra lock, we execute the RPC as normal and use a conditional check in the main body of the function to determine whether control flow should be shifted out of the RPC logic and back to the loadMetadataIfNotExists function. If loadMetadataIfNotExists runs, then the next pass into the main logic body should continue as normal. This may present a slight decrease in performance for newly-loaded metadata, but it is better than affecting the most common case where metadata is not being loaded. */ LoadMetadataContext lmCtx = LoadMetadataContext.mergeFrom( LoadMetadataPOptions.newBuilder().setCreateAncestors(true)); boolean run = true; boolean loadMetadata = false; while (run) { run = false; if (loadMetadata) { loadMetadataIfNotExist(rpcContext, path, lmCtx, false); } try (LockedInodePath inodePath = mInodeTree.lockInodePath(path, LockPattern.READ)) { if (checkPermission) { mPermissionChecker.checkPermission(Mode.Bits.READ, inodePath); } if (!loadMetadata && shouldLoadMetadataIfNotExists(inodePath, lmCtx)) { loadMetadata = true; run = true; continue; } mInodeTree.ensureFullInodePath(inodePath); return inodePath.getInode().getId(); } catch (InvalidPathException | FileDoesNotExistException e) { return IdUtils.INVALID_FILE_ID; } } } catch (InvalidPathException e) { return IdUtils.INVALID_FILE_ID; } return IdUtils.INVALID_FILE_ID; } @Override public FileInfo getFileInfo(long fileId) throws FileDoesNotExistException, AccessControlException, UnavailableException { Metrics.GET_FILE_INFO_OPS.inc(); try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(fileId, LockPattern.READ)) { return getFileInfoInternal(inodePath); } } @Override public FileInfo getFileInfo(AlluxioURI path, GetStatusContext context) throws FileDoesNotExistException, InvalidPathException, AccessControlException, IOException { Metrics.GET_FILE_INFO_OPS.inc(); boolean ufsAccessed = false; long opTimeMs = System.currentTimeMillis(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("getFileInfo", path, null, null)) { if (syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), DescendantType.ONE, auditContext, LockedInodePath::getInodeOrNull, (inodePath, permChecker) -> permChecker.checkPermission(Mode.Bits.READ, inodePath), true)) { // If synced, do not load metadata. context.getOptions().setLoadMetadataType(LoadMetadataPType.NEVER); ufsAccessed = true; } LoadMetadataContext lmCtx = LoadMetadataContext.mergeFrom( LoadMetadataPOptions.newBuilder().setCreateAncestors(true).setCommonOptions( FileSystemMasterCommonPOptions.newBuilder() .setTtl(context.getOptions().getCommonOptions().getTtl()) .setTtlAction(context.getOptions().getCommonOptions().getTtlAction()))); /* See the comments in #getFileIdInternal for an explanation on why the loop here is required. */ boolean run = true; boolean loadMetadata = false; FileInfo ret = null; while (run) { run = false; if (loadMetadata) { checkLoadMetadataOptions(context.getOptions().getLoadMetadataType(), path); loadMetadataIfNotExist(rpcContext, path, lmCtx, true); ufsAccessed = true; } LockingScheme lockingScheme = new LockingScheme(path, LockPattern.READ, false); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { auditContext.setSrcInode(inodePath.getInodeOrNull()); try { mPermissionChecker.checkPermission(Mode.Bits.READ, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } if (!loadMetadata && shouldLoadMetadataIfNotExists(inodePath, lmCtx)) { loadMetadata = true; run = true; continue; } ensureFullPathAndUpdateCache(inodePath); FileInfo fileInfo = getFileInfoInternal(inodePath); if (ufsAccessed) { MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); Metrics.getUfsCounter(mMountTable.getMountInfo( resolution.getMountId()).getUfsUri().toString(), Metrics.UFSOps.GET_FILE_INFO).dec(); } Mode.Bits accessMode = Mode.Bits.fromProto(context.getOptions().getAccessMode()); if (context.getOptions().getUpdateTimestamps() && context.getOptions().hasAccessMode() && (accessMode.imply(Mode.Bits.READ) || accessMode.imply(Mode.Bits.WRITE))) { mAccessTimeUpdater.updateAccessTime(rpcContext.getJournalContext(), inodePath.getInode(), opTimeMs); } auditContext.setSrcInode(inodePath.getInode()).setSucceeded(true); ret = fileInfo; } } return ret; } } /** * @param inodePath the {@link LockedInodePath} to get the {@link FileInfo} for * @return the {@link FileInfo} for the given inode */ private FileInfo getFileInfoInternal(LockedInodePath inodePath) throws FileDoesNotExistException, UnavailableException { Inode inode = inodePath.getInode(); AlluxioURI uri = inodePath.getUri(); FileInfo fileInfo = inode.generateClientFileInfo(uri.toString()); if (fileInfo.isFolder()) { fileInfo.setLength(inode.asDirectory().getChildCount()); } fileInfo.setInMemoryPercentage(getInMemoryPercentage(inode)); fileInfo.setInAlluxioPercentage(getInAlluxioPercentage(inode)); if (inode.isFile()) { try { fileInfo.setFileBlockInfos(getFileBlockInfoListInternal(inodePath)); } catch (InvalidPathException e) { throw new FileDoesNotExistException(e.getMessage(), e); } } // Rehydrate missing block-infos for persisted files. if (fileInfo.getBlockIds().size() > fileInfo.getFileBlockInfos().size() && inode.isPersisted()) { List<Long> missingBlockIds = fileInfo.getBlockIds().stream() .filter((bId) -> fileInfo.getFileBlockInfo(bId) != null).collect(Collectors.toList()); LOG.warn("BlockInfo missing for file: {}. BlockIdsWithMissingInfos: {}", inodePath.getUri(), missingBlockIds.stream().map(Object::toString).collect(Collectors.joining(","))); // Remove old block metadata from block-master before re-committing. mBlockMaster.removeBlocks(fileInfo.getBlockIds(), true); // Commit all the file blocks (without locations) so the metadata for the block exists. commitBlockInfosForFile( fileInfo.getBlockIds(), fileInfo.getLength(), fileInfo.getBlockSizeBytes()); // Reset file-block-info list with the new list. try { fileInfo.setFileBlockInfos(getFileBlockInfoListInternal(inodePath)); } catch (InvalidPathException e) { throw new FileDoesNotExistException( String.format("Hydration failed for file: %s", inodePath.getUri()), e); } } fileInfo.setXAttr(inode.getXAttr()); MountTable.Resolution resolution; try { resolution = mMountTable.resolve(uri); } catch (InvalidPathException e) { throw new FileDoesNotExistException(e.getMessage(), e); } AlluxioURI resolvedUri = resolution.getUri(); fileInfo.setUfsPath(resolvedUri.toString()); fileInfo.setMountId(resolution.getMountId()); Metrics.getUfsCounter(mMountTable.getMountInfo(resolution.getMountId()).getUfsUri().toString(), Metrics.UFSOps.GET_FILE_INFO).inc(); Metrics.FILE_INFOS_GOT.inc(); return fileInfo; } @Override public PersistenceState getPersistenceState(long fileId) throws FileDoesNotExistException { try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(fileId, LockPattern.READ)) { return inodePath.getInode().getPersistenceState(); } } @Override public void listStatus(AlluxioURI path, ListStatusContext context, ResultStream<FileInfo> resultStream) throws AccessControlException, FileDoesNotExistException, InvalidPathException, IOException { Metrics.GET_FILE_INFO_OPS.inc(); LockingScheme lockingScheme = new LockingScheme(path, LockPattern.READ, false); boolean ufsAccessed = false; try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("listStatus", path, null, null)) { DescendantType descendantType = context.getOptions().getRecursive() ? DescendantType.ALL : DescendantType.ONE; if (syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), descendantType, auditContext, LockedInodePath::getInodeOrNull, (inodePath, permChecker) -> permChecker.checkPermission(Mode.Bits.READ, inodePath))) { // If synced, do not load metadata. context.getOptions().setLoadMetadataType(LoadMetadataPType.NEVER); ufsAccessed = true; } /* See the comments in #getFileIdInternal for an explanation on why the loop here is required. */ DescendantType loadDescendantType; if (context.getOptions().getLoadMetadataType() == LoadMetadataPType.NEVER) { loadDescendantType = DescendantType.NONE; } else if (context.getOptions().getRecursive()) { loadDescendantType = DescendantType.ALL; } else { loadDescendantType = DescendantType.ONE; } // load metadata for 1 level of descendants, or all descendants if recursive LoadMetadataContext loadMetadataContext = LoadMetadataContext.mergeFrom( LoadMetadataPOptions.newBuilder().setCreateAncestors(true) .setLoadDescendantType(GrpcUtils.toProto(loadDescendantType)).setCommonOptions( FileSystemMasterCommonPOptions.newBuilder() .setTtl(context.getOptions().getCommonOptions().getTtl()) .setTtlAction(context.getOptions().getCommonOptions().getTtlAction()))); boolean loadMetadata = false; boolean run = true; while (run) { run = false; if (loadMetadata) { loadMetadataIfNotExist(rpcContext, path, loadMetadataContext, false); ufsAccessed = true; } // We just synced; the new lock pattern should not sync. try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { auditContext.setSrcInode(inodePath.getInodeOrNull()); try { mPermissionChecker.checkPermission(Mode.Bits.READ, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } if (!loadMetadata) { Inode inode; boolean isLoaded = true; if (inodePath.fullPathExists()) { inode = inodePath.getInode(); if (inode.isDirectory() && context.getOptions().getLoadMetadataType() != LoadMetadataPType.ALWAYS) { InodeDirectory inodeDirectory = inode.asDirectory(); isLoaded = inodeDirectory.isDirectChildrenLoaded(); if (context.getOptions().getRecursive()) { isLoaded = areDescendantsLoaded(inodeDirectory); } if (isLoaded) { // no need to load again. loadMetadataContext.getOptions().setLoadDescendantType(LoadDescendantPType.NONE); } } } else { checkLoadMetadataOptions(context.getOptions().getLoadMetadataType(), inodePath.getUri()); } if (shouldLoadMetadataIfNotExists(inodePath, loadMetadataContext)) { loadMetadata = true; run = true; continue; } } ensureFullPathAndUpdateCache(inodePath); auditContext.setSrcInode(inodePath.getInode()); if (context.getOptions().getResultsRequired()) { DescendantType descendantTypeForListStatus = (context.getOptions().getRecursive()) ? DescendantType.ALL : DescendantType.ONE; listStatusInternal(context, rpcContext, inodePath, auditContext, descendantTypeForListStatus, resultStream, 0); } auditContext.setSucceeded(true); Metrics.FILE_INFOS_GOT.inc(); if (!ufsAccessed) { MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); Metrics.getUfsCounter(mMountTable.getMountInfo(resolution.getMountId()) .getUfsUri().toString(), Metrics.UFSOps.LIST_STATUS).inc(); } } } } } @Override public List<FileInfo> listStatus(AlluxioURI path, ListStatusContext context) throws AccessControlException, FileDoesNotExistException, InvalidPathException, IOException { final List<FileInfo> fileInfos = new ArrayList<>(); listStatus(path, context, (item) -> fileInfos.add(item)); return fileInfos; } /** * Lists the status of the path in {@link LockedInodePath}, possibly recursively depending on the * descendantType. The result is returned via a list specified by statusList, in postorder * traversal order. * * @param context call context * @param rpcContext the context for the RPC call * @param currInodePath the inode path to find the status * @param auditContext the audit context to return any access exceptions * @param descendantType if the currInodePath is a directory, how many levels of its descendant * should be returned * @param resultStream the stream to receive individual results * @param depth internal use field for tracking depth relative to root item */ private void listStatusInternal(ListStatusContext context, RpcContext rpcContext, LockedInodePath currInodePath, AuditContext auditContext, DescendantType descendantType, ResultStream<FileInfo> resultStream, int depth) throws FileDoesNotExistException, UnavailableException, AccessControlException, InvalidPathException { rpcContext.throwIfCancelled(); Inode inode = currInodePath.getInode(); if (inode.isDirectory() && descendantType != DescendantType.NONE) { try { // TODO(david): Return the error message when we do not have permission mPermissionChecker.checkPermission(Mode.Bits.EXECUTE, currInodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); if (descendantType == DescendantType.ALL) { return; } else { throw e; } } mAccessTimeUpdater.updateAccessTime(rpcContext.getJournalContext(), inode, CommonUtils.getCurrentMs()); DescendantType nextDescendantType = (descendantType == DescendantType.ALL) ? DescendantType.ALL : DescendantType.NONE; // This is to generate a parsed child path components to be passed to lockChildPath String [] childComponentsHint = null; for (Inode child : mInodeStore.getChildren(inode.asDirectory())) { if (childComponentsHint == null) { String[] parentComponents = PathUtils.getPathComponents(currInodePath.getUri().getPath()); childComponentsHint = new String[parentComponents.length + 1]; System.arraycopy(parentComponents, 0, childComponentsHint, 0, parentComponents.length); } // TODO(david): Make extending InodePath more efficient childComponentsHint[childComponentsHint.length - 1] = child.getName(); try (LockedInodePath childInodePath = currInodePath.lockChild(child, LockPattern.READ, childComponentsHint)) { listStatusInternal(context, rpcContext, childInodePath, auditContext, nextDescendantType, resultStream, depth + 1); } catch (InvalidPathException | FileDoesNotExistException e) { LOG.debug("Path \"{}\" is invalid, has been ignored.", PathUtils.concatPath("/", childComponentsHint)); } } } // Listing a directory should not emit item for the directory itself. if (depth != 0 || inode.isFile()) { resultStream.submit(getFileInfoInternal(currInodePath)); } } /** * Checks the {@link LoadMetadataPType} to determine whether or not to proceed in loading * metadata. This method assumes that the path does not exist in Alluxio namespace, and will * throw an exception if metadata should not be loaded. * * @param loadMetadataType the {@link LoadMetadataPType} to check * @param path the path that does not exist in Alluxio namespace (used for exception message) */ private void checkLoadMetadataOptions(LoadMetadataPType loadMetadataType, AlluxioURI path) throws FileDoesNotExistException { if (loadMetadataType == LoadMetadataPType.NEVER || (loadMetadataType == LoadMetadataPType.ONCE && mUfsAbsentPathCache.isAbsent(path))) { throw new FileDoesNotExistException(ExceptionMessage.PATH_DOES_NOT_EXIST.getMessage(path)); } } private boolean areDescendantsLoaded(InodeDirectoryView inode) { if (!inode.isDirectChildrenLoaded()) { return false; } for (Inode child : mInodeStore.getChildren(inode)) { if (child.isDirectory()) { if (!areDescendantsLoaded(child.asDirectory())) { return false; } } } return true; } /** * Checks to see if the entire path exists in Alluxio. Updates the absent cache if it does not * exist. * * @param inodePath the path to ensure */ private void ensureFullPathAndUpdateCache(LockedInodePath inodePath) throws InvalidPathException, FileDoesNotExistException { boolean exists = false; try { mInodeTree.ensureFullInodePath(inodePath); exists = true; } finally { if (!exists) { mUfsAbsentPathCache.process(inodePath.getUri(), inodePath.getInodeList()); } } } @Override public FileSystemMasterView getFileSystemMasterView() { return new FileSystemMasterView(this); } @Override public void checkAccess(AlluxioURI path, CheckAccessContext context) throws FileDoesNotExistException, InvalidPathException, AccessControlException, IOException { try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("checkAccess", path, null, null)) { Mode.Bits bits = Mode.Bits.fromProto(context.getOptions().getBits()); syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), DescendantType.NONE, auditContext, LockedInodePath::getInodeOrNull, (inodePath, permChecker) -> permChecker.checkPermission(bits, inodePath) ); LockingScheme lockingScheme = createLockingScheme(path, context.getOptions().getCommonOptions(), LockPattern.READ); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { mPermissionChecker.checkPermission(bits, inodePath); if (!inodePath.fullPathExists()) { throw new FileDoesNotExistException(ExceptionMessage .PATH_DOES_NOT_EXIST.getMessage(path)); } auditContext.setSucceeded(true); } } } @Override public List<AlluxioURI> checkConsistency(AlluxioURI path, CheckConsistencyContext context) throws AccessControlException, FileDoesNotExistException, InvalidPathException, IOException { List<AlluxioURI> inconsistentUris = new ArrayList<>(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("checkConsistency", path, null, null)) { syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), DescendantType.ALL, auditContext, LockedInodePath::getInodeOrNull, (inodePath, permChecker) -> permChecker.checkPermission(Mode.Bits.READ, inodePath)); LockingScheme lockingScheme = createLockingScheme(path, context.getOptions().getCommonOptions(), LockPattern.READ); try (LockedInodePath parent = mInodeTree.lockInodePath( lockingScheme.getPath(), lockingScheme.getPattern())) { auditContext.setSrcInode(parent.getInodeOrNull()); try { mPermissionChecker.checkPermission(Mode.Bits.READ, parent); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } checkConsistencyRecursive(parent, inconsistentUris); auditContext.setSucceeded(true); } } return inconsistentUris; } private void checkConsistencyRecursive(LockedInodePath inodePath, List<AlluxioURI> inconsistentUris) throws IOException, FileDoesNotExistException { Inode inode = inodePath.getInode(); try { if (!checkConsistencyInternal(inodePath)) { inconsistentUris.add(inodePath.getUri()); } if (inode.isDirectory()) { InodeDirectory inodeDir = inode.asDirectory(); for (Inode child : mInodeStore.getChildren(inodeDir)) { try (LockedInodePath childPath = inodePath.lockChild(child, LockPattern.READ)) { checkConsistencyRecursive(childPath, inconsistentUris); } } } } catch (InvalidPathException e) { LOG.debug("Path \"{}\" is invalid, has been ignored.", PathUtils.concatPath(inodePath.getUri().getPath())); } } /** * Checks if a path is consistent between Alluxio and the underlying storage. * <p> * A path without a backing under storage is always consistent. * <p> * A not persisted path is considered consistent if: * 1. It does not shadow an object in the underlying storage. * <p> * A persisted path is considered consistent if: * 1. An equivalent object exists for its under storage path. * 2. The metadata of the Alluxio and under storage object are equal. * * @param inodePath the path to check. This must exist and be read-locked * @return true if the path is consistent, false otherwise */ private boolean checkConsistencyInternal(LockedInodePath inodePath) throws InvalidPathException, IOException { Inode inode; try { inode = inodePath.getInode(); } catch (FileDoesNotExistException e) { throw new RuntimeException(e); // already checked existence when creating the inodePath } MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); String ufsPath = resolution.getUri().getPath(); if (ufs == null) { return true; } if (!inode.isPersisted()) { return !ufs.exists(ufsPath); } UfsStatus ufsStatus; try { ufsStatus = ufs.getStatus(ufsPath); } catch (FileNotFoundException e) { return !inode.isPersisted(); } // TODO(calvin): Evaluate which other metadata fields should be validated. if (inode.isDirectory()) { return ufsStatus.isDirectory(); } else { String ufsFingerprint = Fingerprint.create(ufs.getUnderFSType(), ufsStatus).serialize(); return ufsStatus.isFile() && (ufsFingerprint.equals(inode.asFile().getUfsFingerprint())); } } } @Override public void completeFile(AlluxioURI path, CompleteFileContext context) throws BlockInfoException, FileDoesNotExistException, InvalidPathException, InvalidFileSizeException, FileAlreadyCompletedException, AccessControlException, UnavailableException { Metrics.COMPLETE_FILE_OPS.inc(); // No need to syncMetadata before complete. try (RpcContext rpcContext = createRpcContext(context); LockedInodePath inodePath = mInodeTree.lockFullInodePath(path, LockPattern.WRITE_INODE); FileSystemMasterAuditContext auditContext = createAuditContext("completeFile", path, null, inodePath.getInodeOrNull())) { try { mPermissionChecker.checkPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } // Even readonly mount points should be able to complete a file, for UFS reads in CACHE mode. completeFileInternal(rpcContext, inodePath, context); // Schedule async persistence if requested. if (context.getOptions().hasAsyncPersistOptions()) { scheduleAsyncPersistenceInternal(inodePath, ScheduleAsyncPersistenceContext .create(context.getOptions().getAsyncPersistOptionsBuilder()), rpcContext); } auditContext.setSucceeded(true); } } /** * Completes a file. After a file is completed, it cannot be written to. * * @param rpcContext the rpc context * @param inodePath the {@link LockedInodePath} to complete * @param context the method context */ void completeFileInternal(RpcContext rpcContext, LockedInodePath inodePath, CompleteFileContext context) throws InvalidPathException, FileDoesNotExistException, BlockInfoException, FileAlreadyCompletedException, InvalidFileSizeException, UnavailableException { Inode inode = inodePath.getInode(); if (!inode.isFile()) { throw new FileDoesNotExistException( ExceptionMessage.PATH_MUST_BE_FILE.getMessage(inodePath.getUri())); } InodeFile fileInode = inode.asFile(); List<Long> blockIdList = fileInode.getBlockIds(); List<BlockInfo> blockInfoList = mBlockMaster.getBlockInfoList(blockIdList); if (!fileInode.isPersisted() && blockInfoList.size() != blockIdList.size()) { throw new BlockInfoException("Cannot complete a file without all the blocks committed"); } // Iterate over all file blocks committed to Alluxio, computing the length and verify that all // the blocks (except the last one) is the same size as the file block size. long inAlluxioLength = 0; long fileBlockSize = fileInode.getBlockSizeBytes(); for (int i = 0; i < blockInfoList.size(); i++) { BlockInfo blockInfo = blockInfoList.get(i); inAlluxioLength += blockInfo.getLength(); if (i < blockInfoList.size() - 1 && blockInfo.getLength() != fileBlockSize) { throw new BlockInfoException( "Block index " + i + " has a block size smaller than the file block size (" + fileInode .getBlockSizeBytes() + ")"); } } // If the file is persisted, its length is determined by UFS. Otherwise, its length is // determined by its size in Alluxio. long length = fileInode.isPersisted() ? context.getOptions().getUfsLength() : inAlluxioLength; String ufsFingerprint = Constants.INVALID_UFS_FINGERPRINT; if (fileInode.isPersisted()) { UfsStatus ufsStatus = context.getUfsStatus(); // Retrieve the UFS fingerprint for this file. MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); AlluxioURI resolvedUri = resolution.getUri(); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); if (ufsStatus == null) { ufsFingerprint = ufs.getFingerprint(resolvedUri.toString()); } else { ufsFingerprint = Fingerprint.create(ufs.getUnderFSType(), ufsStatus).serialize(); } } } completeFileInternal(rpcContext, inodePath, length, context.getOperationTimeMs(), ufsFingerprint); } /** * @param rpcContext the rpc context * @param inodePath the {@link LockedInodePath} to complete * @param length the length to use * @param opTimeMs the operation time (in milliseconds) * @param ufsFingerprint the ufs fingerprint */ private void completeFileInternal(RpcContext rpcContext, LockedInodePath inodePath, long length, long opTimeMs, String ufsFingerprint) throws FileDoesNotExistException, InvalidPathException, InvalidFileSizeException, FileAlreadyCompletedException, UnavailableException { Preconditions.checkState(inodePath.getLockPattern().isWrite()); InodeFile inode = inodePath.getInodeFile(); if (inode.isCompleted() && inode.getLength() != Constants.UNKNOWN_SIZE) { throw new FileAlreadyCompletedException("File " + getName() + " has already been completed."); } if (length < 0 && length != Constants.UNKNOWN_SIZE) { throw new InvalidFileSizeException( "File " + inode.getName() + " cannot have negative length: " + length); } Builder entry = UpdateInodeFileEntry.newBuilder() .setId(inode.getId()) .setPath(inodePath.getUri().getPath()) .setCompleted(true) .setLength(length); if (length == Constants.UNKNOWN_SIZE) { // TODO(gpang): allow unknown files to be multiple blocks. // If the length of the file is unknown, only allow 1 block to the file. length = inode.getBlockSizeBytes(); } int sequenceNumber = 0; long remainingBytes = length; while (remainingBytes > 0) { entry.addSetBlocks(BlockId.createBlockId(inode.getBlockContainerId(), sequenceNumber)); remainingBytes -= Math.min(remainingBytes, inode.getBlockSizeBytes()); sequenceNumber++; } if (inode.isPersisted()) { // Commit all the file blocks (without locations) so the metadata for the block exists. commitBlockInfosForFile(entry.getSetBlocksList(), length, inode.getBlockSizeBytes()); // The path exists in UFS, so it is no longer absent mUfsAbsentPathCache.processExisting(inodePath.getUri()); } // We could introduce a concept of composite entries, so that these two entries could // be applied in a single call to applyAndJournal. mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder() .setId(inode.getId()) .setUfsFingerprint(ufsFingerprint) .setLastModificationTimeMs(opTimeMs) .setLastAccessTimeMs(opTimeMs) .setOverwriteModificationTime(true) .build()); mInodeTree.updateInodeFile(rpcContext, entry.build()); Metrics.FILES_COMPLETED.inc(); } /** * Commits blocks to BlockMaster for given block list. * * @param blockIds the list of block ids * @param fileLength length of the file in bytes * @param blockSize the block size in bytes */ private void commitBlockInfosForFile(List<Long> blockIds, long fileLength, long blockSize) throws UnavailableException { long currLength = fileLength; for (long blockId : blockIds) { long currentBlockSize = Math.min(currLength, blockSize); mBlockMaster.commitBlockInUFS(blockId, currentBlockSize); currLength -= currentBlockSize; } } @Override public FileInfo createFile(AlluxioURI path, CreateFileContext context) throws AccessControlException, InvalidPathException, FileAlreadyExistsException, BlockInfoException, IOException, FileDoesNotExistException { Metrics.CREATE_FILES_OPS.inc(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("createFile", path, null, null)) { syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), DescendantType.ONE, auditContext, (inodePath) -> context.getOptions().getRecursive() ? inodePath.getLastExistingInode() : inodePath.getParentInodeOrNull(), (inodePath, permChecker) -> permChecker .checkParentPermission(Mode.Bits.WRITE, inodePath)); LockingScheme lockingScheme = createLockingScheme(path, context.getOptions().getCommonOptions(), LockPattern.WRITE_EDGE); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { auditContext.setSrcInode(inodePath.getParentInodeOrNull()); if (context.getOptions().getRecursive()) { auditContext.setSrcInode(inodePath.getLastExistingInode()); } try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } mMountTable.checkUnderWritableMountPoint(path); if (context.isPersisted()) { // Check if ufs is writable checkUfsMode(path, OperationType.WRITE); } createFileInternal(rpcContext, inodePath, context); auditContext.setSrcInode(inodePath.getInode()).setSucceeded(true); return getFileInfoInternal(inodePath); } } } /** * @param rpcContext the rpc context * @param inodePath the path to be created * @param context the method context * @return the list of created inodes */ List<Inode> createFileInternal(RpcContext rpcContext, LockedInodePath inodePath, CreateFileContext context) throws InvalidPathException, FileAlreadyExistsException, BlockInfoException, IOException, FileDoesNotExistException { if (mWhitelist.inList(inodePath.getUri().toString())) { context.setCacheable(true); } // If the create succeeded, the list of created inodes will not be empty. List<Inode> created = mInodeTree.createPath(rpcContext, inodePath, context); if (context.isPersisted()) { // The path exists in UFS, so it is no longer absent. The ancestors exist in UFS, but the // actual file does not exist in UFS yet. mUfsAbsentPathCache.processExisting(inodePath.getUri().getParent()); } else { MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); Metrics.getUfsCounter(mMountTable.getMountInfo(resolution.getMountId()) .getUfsUri().toString(), Metrics.UFSOps.CREATE_FILE).inc(); } Metrics.FILES_CREATED.inc(); return created; } @Override public long getNewBlockIdForFile(AlluxioURI path) throws FileDoesNotExistException, InvalidPathException, AccessControlException, UnavailableException { Metrics.GET_NEW_BLOCK_OPS.inc(); try (RpcContext rpcContext = createRpcContext(); LockedInodePath inodePath = mInodeTree.lockFullInodePath(path, LockPattern.WRITE_INODE); FileSystemMasterAuditContext auditContext = createAuditContext("getNewBlockIdForFile", path, null, inodePath.getInodeOrNull())) { try { mPermissionChecker.checkPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } Metrics.NEW_BLOCKS_GOT.inc(); long blockId = mInodeTree.newBlock(rpcContext, NewBlockEntry.newBuilder() .setId(inodePath.getInode().getId()) .build()); auditContext.setSucceeded(true); return blockId; } } @Override public Map<String, MountPointInfo> getMountPointInfoSummary() { SortedMap<String, MountPointInfo> mountPoints = new TreeMap<>(); for (Map.Entry<String, MountInfo> mountPoint : mMountTable.getMountTable().entrySet()) { mountPoints.put(mountPoint.getKey(), getDisplayMountPointInfo(mountPoint.getValue())); } return mountPoints; } @Override public MountPointInfo getDisplayMountPointInfo(AlluxioURI path) throws InvalidPathException { if (!mMountTable.isMountPoint(path)) { throw new InvalidPathException( ExceptionMessage.PATH_MUST_BE_MOUNT_POINT.getMessage(path)); } return getDisplayMountPointInfo(mMountTable.getMountTable().get(path.toString())); } /** * Gets the mount point information for display from a mount information. * * @param mountInfo the mount information to transform * @return the mount point information */ private MountPointInfo getDisplayMountPointInfo(MountInfo mountInfo) { MountPointInfo info = mountInfo.toDisplayMountPointInfo(); try (CloseableResource<UnderFileSystem> ufsResource = mUfsManager.get(mountInfo.getMountId()).acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); info.setUfsType(ufs.getUnderFSType()); try { info.setUfsCapacityBytes( ufs.getSpace(info.getUfsUri(), UnderFileSystem.SpaceType.SPACE_TOTAL)); } catch (IOException e) { LOG.warn("Cannot get total capacity of {}", info.getUfsUri(), e); } try { info.setUfsUsedBytes( ufs.getSpace(info.getUfsUri(), UnderFileSystem.SpaceType.SPACE_USED)); } catch (IOException e) { LOG.warn("Cannot get used capacity of {}", info.getUfsUri(), e); } } catch (UnavailableException | NotFoundException e) { // We should never reach here LOG.error("No UFS cached for {}", info, e); } return info; } @Override public long getInodeCount() { return mInodeTree.getInodeCount(); } @Override public int getNumberOfPinnedFiles() { return mInodeTree.getPinnedSize(); } @Override public void delete(AlluxioURI path, DeleteContext context) throws IOException, FileDoesNotExistException, DirectoryNotEmptyException, InvalidPathException, AccessControlException { Metrics.DELETE_PATHS_OPS.inc(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("delete", path, null, null)) { syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), context.getOptions().getRecursive() ? DescendantType.ALL : DescendantType.ONE, auditContext, LockedInodePath::getInodeOrNull, (inodePath, permChecker) -> permChecker.checkParentPermission(Mode.Bits.WRITE, inodePath) ); LockingScheme lockingScheme = createLockingScheme(path, context.getOptions().getCommonOptions(), LockPattern.WRITE_EDGE); try (LockedInodePath inodePath = mInodeTree .lockInodePath(lockingScheme)) { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); if (context.getOptions().getRecursive()) { List<String> failedChildren = new ArrayList<>(); try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) { for (LockedInodePath childPath : descendants) { try { mPermissionChecker.checkPermission(Mode.Bits.WRITE, childPath); if (mMountTable.isMountPoint(childPath.getUri())) { mMountTable.checkUnderWritableMountPoint(childPath.getUri()); } } catch (AccessControlException e) { failedChildren.add(e.getMessage()); } } if (failedChildren.size() > 0) { throw new AccessControlException(ExceptionMessage.DELETE_FAILED_DIR_CHILDREN .getMessage(path, StringUtils.join(failedChildren, ","))); } } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } } mMountTable.checkUnderWritableMountPoint(path); if (!inodePath.fullPathExists()) { throw new FileDoesNotExistException(ExceptionMessage.PATH_DOES_NOT_EXIST .getMessage(path)); } deleteInternal(rpcContext, inodePath, context); auditContext.setSucceeded(true); } } } /** * Implements file deletion. * <p> * This method does not delete blocks. Instead, it returns deleted inodes so that their blocks can * be deleted after the inode deletion journal entry has been written. We cannot delete blocks * earlier because the inode deletion may fail, leaving us with inode containing deleted blocks. * * @param rpcContext the rpc context * @param inodePath the file {@link LockedInodePath} * @param deleteContext the method optitions */ @VisibleForTesting public void deleteInternal(RpcContext rpcContext, LockedInodePath inodePath, DeleteContext deleteContext) throws FileDoesNotExistException, IOException, DirectoryNotEmptyException, InvalidPathException { Preconditions.checkState(inodePath.getLockPattern() == LockPattern.WRITE_EDGE); // TODO(jiri): A crash after any UFS object is deleted and before the delete operation is // journaled will result in an inconsistency between Alluxio and UFS. if (!inodePath.fullPathExists()) { return; } long opTimeMs = System.currentTimeMillis(); Inode inode = inodePath.getInode(); if (inode == null) { return; } boolean recursive = deleteContext.getOptions().getRecursive(); if (inode.isDirectory() && !recursive && mInodeStore.hasChildren(inode.asDirectory())) { // inode is nonempty, and we don't want to delete a nonempty directory unless recursive is // true throw new DirectoryNotEmptyException(ExceptionMessage.DELETE_NONEMPTY_DIRECTORY_NONRECURSIVE, inode.getName()); } if (mInodeTree.isRootId(inode.getId())) { // The root cannot be deleted. throw new InvalidPathException(ExceptionMessage.DELETE_ROOT_DIRECTORY.getMessage()); } // Inodes for which deletion will be attempted List<Pair<AlluxioURI, LockedInodePath>> inodesToDelete = new ArrayList<>(); // Add root of sub-tree to delete inodesToDelete.add(new Pair<>(inodePath.getUri(), inodePath)); try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) { for (LockedInodePath childPath : descendants) { inodesToDelete.add(new Pair<>(mInodeTree.getPath(childPath.getInode()), childPath)); } // Prepare to delete persisted inodes UfsDeleter ufsDeleter = NoopUfsDeleter.INSTANCE; if (!deleteContext.getOptions().getAlluxioOnly()) { ufsDeleter = new SafeUfsDeleter(mMountTable, mInodeStore, inodesToDelete, deleteContext.getOptions().build()); } // Inodes to delete from tree after attempting to delete from UFS List<Pair<AlluxioURI, LockedInodePath>> revisedInodesToDelete = new ArrayList<>(); // Inodes that are not safe for recursive deletes Set<Long> unsafeInodes = new HashSet<>(); // Alluxio URIs (and the reason for failure) which could not be deleted List<Pair<String, String>> failedUris = new ArrayList<>(); // We go through each inode, removing it from its parent set and from mDelInodes. If it's a // file, we deal with the checkpoints and blocks as well. for (int i = inodesToDelete.size() - 1; i >= 0; i--) { rpcContext.throwIfCancelled(); Pair<AlluxioURI, LockedInodePath> inodePairToDelete = inodesToDelete.get(i); AlluxioURI alluxioUriToDelete = inodePairToDelete.getFirst(); Inode inodeToDelete = inodePairToDelete.getSecond().getInode(); String failureReason = null; if (unsafeInodes.contains(inodeToDelete.getId())) { failureReason = ExceptionMessage.DELETE_FAILED_DIR_NONEMPTY.getMessage(); } else if (inodeToDelete.isPersisted()) { // If this is a mount point, we have deleted all the children and can unmount it // TODO(calvin): Add tests (ALLUXIO-1831) if (mMountTable.isMountPoint(alluxioUriToDelete)) { mMountTable.delete(rpcContext, alluxioUriToDelete, true); } else { if (!deleteContext.getOptions().getAlluxioOnly()) { try { checkUfsMode(alluxioUriToDelete, OperationType.WRITE); // Attempt to delete node if all children were deleted successfully ufsDeleter.delete(alluxioUriToDelete, inodeToDelete); } catch (AccessControlException e) { // In case ufs is not writable, we will still attempt to delete other entries // if any as they may be from a different mount point LOG.warn(e.getMessage()); failureReason = e.getMessage(); } catch (IOException e) { LOG.warn(e.getMessage()); failureReason = e.getMessage(); } } } } if (failureReason == null) { if (inodeToDelete.isFile()) { long fileId = inodeToDelete.getId(); // Remove the file from the set of files to persist. mPersistRequests.remove(fileId); // Cancel any ongoing jobs. PersistJob job = mPersistJobs.get(fileId); if (job != null) { job.setCancelState(PersistJob.CancelState.TO_BE_CANCELED); } } revisedInodesToDelete.add(new Pair<>(alluxioUriToDelete, inodePairToDelete.getSecond())); } else { unsafeInodes.add(inodeToDelete.getId()); // Propagate 'unsafe-ness' to parent as one of its descendants can't be deleted unsafeInodes.add(inodeToDelete.getParentId()); failedUris.add(new Pair<>(alluxioUriToDelete.toString(), failureReason)); } } if (mSyncManager.isSyncPoint(inodePath.getUri())) { mSyncManager.stopSyncAndJournal(RpcContext.NOOP, inodePath.getUri()); } // Delete Inodes for (Pair<AlluxioURI, LockedInodePath> delInodePair : revisedInodesToDelete) { LockedInodePath tempInodePath = delInodePair.getSecond(); MountTable.Resolution resolution = mMountTable.resolve(tempInodePath.getUri()); mInodeTree.deleteInode(rpcContext, tempInodePath, opTimeMs); if (deleteContext.getOptions().getAlluxioOnly()) { Metrics.getUfsCounter(mMountTable.getMountInfo(resolution.getMountId()) .getUfsUri().toString(), Metrics.UFSOps.DELETE_FILE).inc(); } } if (!failedUris.isEmpty()) { Collection<String> messages = failedUris.stream() .map(pair -> String.format("%s (%s)", pair.getFirst(), pair.getSecond())) .collect(Collectors.toList()); throw new FailedPreconditionException( ExceptionMessage.DELETE_FAILED_UFS.getMessage(StringUtils.join(messages, ", "))); } } Metrics.PATHS_DELETED.inc(inodesToDelete.size()); } @Override public List<FileBlockInfo> getFileBlockInfoList(AlluxioURI path) throws FileDoesNotExistException, InvalidPathException, AccessControlException, UnavailableException { Metrics.GET_FILE_BLOCK_INFO_OPS.inc(); try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(path, LockPattern.READ); FileSystemMasterAuditContext auditContext = createAuditContext("getFileBlockInfoList", path, null, inodePath.getInodeOrNull())) { try { mPermissionChecker.checkPermission(Mode.Bits.READ, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } List<FileBlockInfo> ret = getFileBlockInfoListInternal(inodePath); Metrics.FILE_BLOCK_INFOS_GOT.inc(); auditContext.setSucceeded(true); return ret; } } /** * @param inodePath the {@link LockedInodePath} to get the info for * @return a list of {@link FileBlockInfo} for all the blocks of the given inode */ private List<FileBlockInfo> getFileBlockInfoListInternal(LockedInodePath inodePath) throws InvalidPathException, FileDoesNotExistException, UnavailableException { InodeFile file = inodePath.getInodeFile(); List<BlockInfo> blockInfoList = mBlockMaster.getBlockInfoList(file.getBlockIds()); List<FileBlockInfo> ret = new ArrayList<>(); for (BlockInfo blockInfo : blockInfoList) { ret.add(generateFileBlockInfo(inodePath, blockInfo)); } return ret; } /** * Generates a {@link FileBlockInfo} object from internal metadata. This adds file information to * the block, such as the file offset, and additional UFS locations for the block. * * @param inodePath the file the block is a part of * @param blockInfo the {@link BlockInfo} to generate the {@link FileBlockInfo} from * @return a new {@link FileBlockInfo} for the block */ private FileBlockInfo generateFileBlockInfo(LockedInodePath inodePath, BlockInfo blockInfo) throws FileDoesNotExistException { InodeFile file = inodePath.getInodeFile(); FileBlockInfo fileBlockInfo = new FileBlockInfo(); fileBlockInfo.setBlockInfo(blockInfo); fileBlockInfo.setUfsLocations(new ArrayList<>()); // The sequence number part of the block id is the block index. long offset = file.getBlockSizeBytes() * BlockId.getSequenceNumber(blockInfo.getBlockId()); fileBlockInfo.setOffset(offset); if (fileBlockInfo.getBlockInfo().getLocations().isEmpty() && file.isPersisted()) { // No alluxio locations, but there is a checkpoint in the under storage system. Add the // locations from the under storage system. long blockId = fileBlockInfo.getBlockInfo().getBlockId(); List<String> locations = mUfsBlockLocationCache.get(blockId, inodePath.getUri(), fileBlockInfo.getOffset()); if (locations != null) { fileBlockInfo.setUfsLocations(locations); } } return fileBlockInfo; } /** * Returns whether the inodeFile is fully in Alluxio or not. The file is fully in Alluxio only if * all the blocks of the file are in Alluxio, in other words, the in-Alluxio percentage is 100. * * @return true if the file is fully in Alluxio, false otherwise */ private boolean isFullyInAlluxio(InodeFile inode) throws UnavailableException { return getInAlluxioPercentage(inode) == 100; } /** * Returns whether the inodeFile is fully in memory or not. The file is fully in memory only if * all the blocks of the file are in memory, in other words, the in-memory percentage is 100. * * @return true if the file is fully in Alluxio, false otherwise */ private boolean isFullyInMemory(InodeFile inode) throws UnavailableException { return getInMemoryPercentage(inode) == 100; } @Override public List<AlluxioURI> getInAlluxioFiles() throws UnavailableException { List<AlluxioURI> files = new ArrayList<>(); LockedInodePath rootPath; try { rootPath = mInodeTree.lockFullInodePath(new AlluxioURI(AlluxioURI.SEPARATOR), LockPattern.READ); } catch (FileDoesNotExistException | InvalidPathException e) { // Root should always exist. throw new RuntimeException(e); } try (LockedInodePath inodePath = rootPath) { getInAlluxioFilesInternal(inodePath, files); } return files; } @Override public List<AlluxioURI> getInMemoryFiles() throws UnavailableException { List<AlluxioURI> files = new ArrayList<>(); LockedInodePath rootPath; try { rootPath = mInodeTree.lockFullInodePath(new AlluxioURI(AlluxioURI.SEPARATOR), LockPattern.READ); } catch (FileDoesNotExistException | InvalidPathException e) { // Root should always exist. throw new RuntimeException(e); } try (LockedInodePath inodePath = rootPath) { getInMemoryFilesInternal(inodePath, files); } return files; } /** * Adds in-Alluxio files to the array list passed in. This method assumes the inode passed in is * already read locked. * * @param inodePath the inode path to search * @param files the list to accumulate the results in */ private void getInAlluxioFilesInternal(LockedInodePath inodePath, List<AlluxioURI> files) throws UnavailableException { Inode inode = inodePath.getInodeOrNull(); if (inode == null) { return; } if (inode.isFile()) { if (isFullyInAlluxio(inode.asFile())) { files.add(inodePath.getUri()); } } else { // This inode is a directory. for (Inode child : mInodeStore.getChildren(inode.asDirectory())) { try (LockedInodePath childPath = inodePath.lockChild(child, LockPattern.READ)) { getInAlluxioFilesInternal(childPath, files); } catch (InvalidPathException e) { // Inode is no longer a child, continue. continue; } } } } /** * Adds in-memory files to the array list passed in. This method assumes the inode passed in is * already read locked. * * @param inodePath the inode path to search * @param files the list to accumulate the results in */ private void getInMemoryFilesInternal(LockedInodePath inodePath, List<AlluxioURI> files) throws UnavailableException { Inode inode = inodePath.getInodeOrNull(); if (inode == null) { return; } if (inode.isFile()) { if (isFullyInMemory(inode.asFile())) { files.add(inodePath.getUri()); } } else { // This inode is a directory. for (Inode child : mInodeStore.getChildren(inode.asDirectory())) { try (LockedInodePath childPath = inodePath.lockChild(child, LockPattern.READ)) { getInMemoryFilesInternal(childPath, files); } catch (InvalidPathException e) { // Inode is no longer a child, continue. continue; } } } } /** * Gets the in-memory percentage of an Inode. For a file that has all blocks in Alluxio, it * returns 100; for a file that has no block in memory, it returns 0. Returns 0 for a directory. * * @param inode the inode * @return the in memory percentage */ private int getInMemoryPercentage(Inode inode) throws UnavailableException { if (!inode.isFile()) { return 0; } InodeFile inodeFile = inode.asFile(); long length = inodeFile.getLength(); if (length == 0) { return 100; } long inMemoryLength = 0; for (BlockInfo info : mBlockMaster.getBlockInfoList(inodeFile.getBlockIds())) { if (isInTopStorageTier(info)) { inMemoryLength += info.getLength(); } } return (int) (inMemoryLength * 100 / length); } /** * Gets the in-Alluxio percentage of an Inode. For a file that has all blocks in Alluxio, it * returns 100; for a file that has no block in Alluxio, it returns 0. Returns 0 for a directory. * * @param inode the inode * @return the in alluxio percentage */ private int getInAlluxioPercentage(Inode inode) throws UnavailableException { if (!inode.isFile()) { return 0; } InodeFile inodeFile = inode.asFile(); long length = inodeFile.getLength(); if (length == 0) { return 100; } long inAlluxioLength = 0; for (BlockInfo info : mBlockMaster.getBlockInfoList(inodeFile.getBlockIds())) { if (!info.getLocations().isEmpty()) { inAlluxioLength += info.getLength(); } } return (int) (inAlluxioLength * 100 / length); } /** * @return true if the given block is in the top storage level in some worker, false otherwise */ private boolean isInTopStorageTier(BlockInfo blockInfo) { for (BlockLocation location : blockInfo.getLocations()) { if (mBlockMaster.getGlobalStorageTierAssoc().getOrdinal(location.getTierAlias()) == 0) { return true; } } return false; } @Override public long createDirectory(AlluxioURI path, CreateDirectoryContext context) throws InvalidPathException, FileAlreadyExistsException, IOException, AccessControlException, FileDoesNotExistException { Metrics.CREATE_DIRECTORIES_OPS.inc(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("mkdir", path, null, null)) { syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), DescendantType.ONE, auditContext, inodePath -> context.getOptions().getRecursive() ? inodePath.getLastExistingInode() : inodePath.getParentInodeOrNull(), (inodePath, permChecker) -> permChecker.checkParentPermission(Mode.Bits.WRITE, inodePath) ); LockingScheme lockingScheme = createLockingScheme(path, context.getOptions().getCommonOptions(), LockPattern.WRITE_EDGE); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { auditContext.setSrcInode(inodePath.getParentInodeOrNull()); if (context.getOptions().getRecursive()) { auditContext.setSrcInode(inodePath.getLastExistingInode()); } try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } mMountTable.checkUnderWritableMountPoint(path); if (context.isPersisted()) { checkUfsMode(path, OperationType.WRITE); } createDirectoryInternal(rpcContext, inodePath, context); auditContext.setSrcInode(inodePath.getInode()).setSucceeded(true); return inodePath.getInode().getId(); } } } /** * Implementation of directory creation for a given path. * * @param rpcContext the rpc context * @param inodePath the path of the directory * @param context method context * @return a list of created inodes */ List<Inode> createDirectoryInternal(RpcContext rpcContext, LockedInodePath inodePath, CreateDirectoryContext context) throws InvalidPathException, FileAlreadyExistsException, IOException, FileDoesNotExistException { Preconditions.checkState(inodePath.getLockPattern() == LockPattern.WRITE_EDGE); try { List<Inode> createResult = mInodeTree.createPath(rpcContext, inodePath, context); InodeDirectory inodeDirectory = inodePath.getInode().asDirectory(); String ufsFingerprint = Constants.INVALID_UFS_FINGERPRINT; if (inodeDirectory.isPersisted()) { UfsStatus ufsStatus = context.getUfsStatus(); // Retrieve the UFS fingerprint for this file. MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); AlluxioURI resolvedUri = resolution.getUri(); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); if (ufsStatus == null) { ufsFingerprint = ufs.getFingerprint(resolvedUri.toString()); } else { ufsFingerprint = Fingerprint.create(ufs.getUnderFSType(), ufsStatus).serialize(); } } } mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder() .setId(inodeDirectory.getId()) .setUfsFingerprint(ufsFingerprint) .build()); if (context.isPersisted()) { // The path exists in UFS, so it is no longer absent. mUfsAbsentPathCache.processExisting(inodePath.getUri()); } Metrics.DIRECTORIES_CREATED.inc(); return createResult; } catch (BlockInfoException e) { // Since we are creating a directory, the block size is ignored, no such exception should // happen. throw new RuntimeException(e); } } @Override public void rename(AlluxioURI srcPath, AlluxioURI dstPath, RenameContext context) throws FileAlreadyExistsException, FileDoesNotExistException, InvalidPathException, IOException, AccessControlException { Metrics.RENAME_PATH_OPS.inc(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("rename", srcPath, dstPath, null)) { syncMetadata(rpcContext, srcPath, context.getOptions().getCommonOptions(), DescendantType.ONE, auditContext, LockedInodePath::getParentInodeOrNull, (inodePath, permChecker) -> permChecker.checkParentPermission(Mode.Bits.WRITE, inodePath) ); syncMetadata(rpcContext, dstPath, context.getOptions().getCommonOptions(), DescendantType.ONE, auditContext, LockedInodePath::getParentInodeOrNull, (inodePath, permChecker) -> permChecker.checkParentPermission(Mode.Bits.WRITE, inodePath) ); LockingScheme srcLockingScheme = createLockingScheme(srcPath, context.getOptions().getCommonOptions(), LockPattern.WRITE_EDGE); LockingScheme dstLockingScheme = createLockingScheme(dstPath, context.getOptions().getCommonOptions(), LockPattern.WRITE_EDGE); try (InodePathPair inodePathPair = mInodeTree .lockInodePathPair(srcLockingScheme.getPath(), srcLockingScheme.getPattern(), dstLockingScheme.getPath(), dstLockingScheme.getPattern())) { LockedInodePath srcInodePath = inodePathPair.getFirst(); LockedInodePath dstInodePath = inodePathPair.getSecond(); auditContext.setSrcInode(srcInodePath.getParentInodeOrNull()); try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, srcInodePath); mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, dstInodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } mMountTable.checkUnderWritableMountPoint(srcPath); mMountTable.checkUnderWritableMountPoint(dstPath); renameInternal(rpcContext, srcInodePath, dstInodePath, context); auditContext.setSrcInode(srcInodePath.getInode()).setSucceeded(true); LOG.debug("Renamed {} to {}", srcPath, dstPath); } } } private boolean shouldPersistPath(String path) { for (String pattern : mPersistBlacklist) { if (path.contains(pattern)) { LOG.debug("Not persisting path {} because it is in {}: {}", path, PropertyKey.Name.MASTER_PERSISTENCE_BLACKLIST, mPersistBlacklist); return false; } } return true; } /** * Renames a file to a destination. * * @param rpcContext the rpc context * @param srcInodePath the source path to rename * @param dstInodePath the destination path to rename the file to * @param context method options */ private void renameInternal(RpcContext rpcContext, LockedInodePath srcInodePath, LockedInodePath dstInodePath, RenameContext context) throws InvalidPathException, FileDoesNotExistException, FileAlreadyExistsException, IOException, AccessControlException { if (!srcInodePath.fullPathExists()) { throw new FileDoesNotExistException( ExceptionMessage.PATH_DOES_NOT_EXIST.getMessage(srcInodePath.getUri())); } Inode srcInode = srcInodePath.getInode(); // Renaming path to itself is a no-op. if (srcInodePath.getUri().equals(dstInodePath.getUri())) { return; } // Renaming the root is not allowed. if (srcInodePath.getUri().isRoot()) { throw new InvalidPathException(ExceptionMessage.ROOT_CANNOT_BE_RENAMED.getMessage()); } if (dstInodePath.getUri().isRoot()) { throw new InvalidPathException(ExceptionMessage.RENAME_CANNOT_BE_TO_ROOT.getMessage()); } // Renaming across mount points is not allowed. String srcMount = mMountTable.getMountPoint(srcInodePath.getUri()); String dstMount = mMountTable.getMountPoint(dstInodePath.getUri()); if ((srcMount == null && dstMount != null) || (srcMount != null && dstMount == null) || ( srcMount != null && dstMount != null && !srcMount.equals(dstMount))) { throw new InvalidPathException(ExceptionMessage.RENAME_CANNOT_BE_ACROSS_MOUNTS .getMessage(srcInodePath.getUri(), dstInodePath.getUri())); } // Renaming onto a mount point is not allowed. if (mMountTable.isMountPoint(dstInodePath.getUri())) { throw new InvalidPathException( ExceptionMessage.RENAME_CANNOT_BE_ONTO_MOUNT_POINT.getMessage(dstInodePath.getUri())); } // Renaming a path to one of its subpaths is not allowed. Check for that, by making sure // srcComponents isn't a prefix of dstComponents. if (PathUtils.hasPrefix(dstInodePath.getUri().getPath(), srcInodePath.getUri().getPath())) { throw new InvalidPathException(ExceptionMessage.RENAME_CANNOT_BE_TO_SUBDIRECTORY .getMessage(srcInodePath.getUri(), dstInodePath.getUri())); } // Get the inodes of the src and dst parents. Inode srcParentInode = srcInodePath.getParentInodeDirectory(); if (!srcParentInode.isDirectory()) { throw new InvalidPathException( ExceptionMessage.PATH_MUST_HAVE_VALID_PARENT.getMessage(srcInodePath.getUri())); } Inode dstParentInode = dstInodePath.getParentInodeDirectory(); if (!dstParentInode.isDirectory()) { throw new InvalidPathException( ExceptionMessage.PATH_MUST_HAVE_VALID_PARENT.getMessage(dstInodePath.getUri())); } // Make sure destination path does not exist if (dstInodePath.fullPathExists()) { throw new FileAlreadyExistsException(String .format("Cannot rename because destination already exists. src: %s dst: %s", srcInodePath.getUri(), dstInodePath.getUri())); } // Now we remove srcInode from its parent and insert it into dstPath's parent renameInternal(rpcContext, srcInodePath, dstInodePath, false, context); // Check options and determine if we should schedule async persist. This is helpful for compute // frameworks that use rename as a commit operation. if (context.getPersist() && srcInode.isFile() && !srcInode.isPersisted() && shouldPersistPath(dstInodePath.toString())) { LOG.debug("Schedule Async Persist on rename for File {}", srcInodePath); mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder() .setId(srcInode.getId()) .setPersistenceState(PersistenceState.TO_BE_PERSISTED.name()) .build()); long shouldPersistTime = srcInode.asFile().getShouldPersistTime(); long persistenceWaitTime = shouldPersistTime == Constants.NO_AUTO_PERSIST ? 0 : getPersistenceWaitTime(shouldPersistTime); mPersistRequests.put(srcInode.getId(), new alluxio.time.ExponentialTimer( ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS), ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS), persistenceWaitTime, ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS))); } // If a directory is being renamed with persist on rename, attempt to persist children if (srcInode.isDirectory() && context.getPersist() && shouldPersistPath(dstInodePath.toString())) { LOG.debug("Schedule Async Persist on rename for Dir: {}", dstInodePath); try (LockedInodePathList descendants = mInodeTree.getDescendants(srcInodePath)) { for (LockedInodePath childPath : descendants) { Inode childInode = childPath.getInode(); // TODO(apc999): Resolve the child path legitimately if (childInode.isFile() && !childInode.isPersisted() && shouldPersistPath( childPath.toString().substring(srcInodePath.toString().length()))) { LOG.debug("Schedule Async Persist on rename for Child File: {}", childPath); mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder() .setId(childInode.getId()) .setPersistenceState(PersistenceState.TO_BE_PERSISTED.name()) .build()); long shouldPersistTime = childInode.asFile().getShouldPersistTime(); long persistenceWaitTime = shouldPersistTime == Constants.NO_AUTO_PERSIST ? 0 : getPersistenceWaitTime(shouldPersistTime); mPersistRequests.put(childInode.getId(), new alluxio.time.ExponentialTimer( ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS), ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS), persistenceWaitTime, ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS))); } } } } } /** * Implements renaming. * * @param rpcContext the rpc context * @param srcInodePath the path of the rename source * @param dstInodePath the path to the rename destination * @param replayed whether the operation is a result of replaying the journal * @param context method options */ private void renameInternal(RpcContext rpcContext, LockedInodePath srcInodePath, LockedInodePath dstInodePath, boolean replayed, RenameContext context) throws FileDoesNotExistException, InvalidPathException, IOException, AccessControlException { // Rename logic: // 1. Change the source inode name to the destination name. // 2. Insert the source inode into the destination parent. // 3. Do UFS operations if necessary. // 4. Remove the source inode (reverting the name) from the source parent. // 5. Set the last modification times for both source and destination parent inodes. Inode srcInode = srcInodePath.getInode(); AlluxioURI srcPath = srcInodePath.getUri(); AlluxioURI dstPath = dstInodePath.getUri(); InodeDirectory srcParentInode = srcInodePath.getParentInodeDirectory(); InodeDirectory dstParentInode = dstInodePath.getParentInodeDirectory(); String srcName = srcPath.getName(); String dstName = dstPath.getName(); LOG.debug("Renaming {} to {}", srcPath, dstPath); if (dstInodePath.fullPathExists()) { throw new InvalidPathException("Destination path: " + dstPath + " already exists."); } mInodeTree.rename(rpcContext, RenameEntry.newBuilder() .setId(srcInode.getId()) .setOpTimeMs(context.getOperationTimeMs()) .setNewParentId(dstParentInode.getId()) .setNewName(dstName) .setPath(srcPath.getPath()) .setNewPath(dstPath.getPath()) .build()); // 3. Do UFS operations if necessary. // If the source file is persisted, rename it in the UFS. try { if (!replayed && srcInode.isPersisted()) { // Check if ufs is writable checkUfsMode(srcPath, OperationType.WRITE); checkUfsMode(dstPath, OperationType.WRITE); MountTable.Resolution resolution = mMountTable.resolve(srcPath); // Persist ancestor directories from top to the bottom. We cannot use recursive create // parents here because the permission for the ancestors can be different. // inodes from the same mount point as the dst Stack<InodeDirectory> sameMountDirs = new Stack<>(); List<Inode> dstInodeList = dstInodePath.getInodeList(); for (int i = dstInodeList.size() - 1; i >= 0; i--) { // Since dstInodePath is guaranteed not to be a full path, all inodes in the incomplete // path are guaranteed to be a directory. InodeDirectory dir = dstInodeList.get(i).asDirectory(); sameMountDirs.push(dir); if (dir.isMountPoint()) { break; } } while (!sameMountDirs.empty()) { InodeDirectory dir = sameMountDirs.pop(); if (!dir.isPersisted()) { mInodeTree.syncPersistExistingDirectory(rpcContext, dir); } } String ufsSrcPath = resolution.getUri().toString(); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); String ufsDstUri = mMountTable.resolve(dstPath).getUri().toString(); boolean success; if (srcInode.isFile()) { success = ufs.renameRenamableFile(ufsSrcPath, ufsDstUri); } else { success = ufs.renameRenamableDirectory(ufsSrcPath, ufsDstUri); } if (!success) { throw new IOException( ExceptionMessage.FAILED_UFS_RENAME.getMessage(ufsSrcPath, ufsDstUri)); } } // The destination was persisted in ufs. mUfsAbsentPathCache.processExisting(dstPath); } } catch (Throwable t) { // On failure, revert changes and throw exception. mInodeTree.rename(rpcContext, RenameEntry.newBuilder() .setId(srcInode.getId()) .setOpTimeMs(context.getOperationTimeMs()) .setNewName(srcName) .setNewParentId(srcParentInode.getId()) .setPath(dstPath.getPath()) .setNewPath(srcPath.getPath()) .build()); throw t; } Metrics.PATHS_RENAMED.inc(); } /** * Propagates the persisted status to all parents of the given inode in the same mount partition. * * @param journalContext the journal context * @param inodePath the inode to start the propagation at * @return list of inodes which were marked as persisted */ private void propagatePersistedInternal(Supplier<JournalContext> journalContext, LockedInodePath inodePath) throws FileDoesNotExistException { Inode inode = inodePath.getInode(); List<Inode> inodes = inodePath.getInodeList(); // Traverse the inodes from target inode to the root. Collections.reverse(inodes); // Skip the first, to not examine the target inode itself. inodes = inodes.subList(1, inodes.size()); List<Inode> persistedInodes = new ArrayList<>(); for (Inode ancestor : inodes) { // the path is already locked. AlluxioURI path = mInodeTree.getPath(ancestor); if (mMountTable.isMountPoint(path)) { // Stop propagating the persisted status at mount points. break; } if (ancestor.isPersisted()) { // Stop if a persisted directory is encountered. break; } mInodeTree.updateInode(journalContext, UpdateInodeEntry.newBuilder() .setId(ancestor.getId()) .setPersistenceState(PersistenceState.PERSISTED.name()) .build()); } } @Override public void free(AlluxioURI path, FreeContext context) throws FileDoesNotExistException, InvalidPathException, AccessControlException, UnexpectedAlluxioException, IOException { Metrics.FREE_FILE_OPS.inc(); // No need to syncMetadata before free. try (RpcContext rpcContext = createRpcContext(context); LockedInodePath inodePath = mInodeTree.lockFullInodePath(path, LockPattern.WRITE_INODE); FileSystemMasterAuditContext auditContext = createAuditContext("free", path, null, inodePath.getInodeOrNull())) { try { mPermissionChecker.checkPermission(Mode.Bits.READ, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } freeInternal(rpcContext, inodePath, context); auditContext.setSucceeded(true); } } /** * Implements free operation. * * @param rpcContext the rpc context * @param inodePath inode of the path to free * @param context context to free method */ private void freeInternal(RpcContext rpcContext, LockedInodePath inodePath, FreeContext context) throws FileDoesNotExistException, UnexpectedAlluxioException, IOException, InvalidPathException, AccessControlException { Inode inode = inodePath.getInode(); if (inode.isDirectory() && !context.getOptions().getRecursive() && mInodeStore.hasChildren(inode.asDirectory())) { // inode is nonempty, and we don't free a nonempty directory unless recursive is true throw new UnexpectedAlluxioException( ExceptionMessage.CANNOT_FREE_NON_EMPTY_DIR.getMessage(mInodeTree.getPath(inode))); } long opTimeMs = System.currentTimeMillis(); List<Inode> freeInodes = new ArrayList<>(); freeInodes.add(inode); try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) { for (LockedInodePath descedant : Iterables.concat(descendants, Collections.singleton(inodePath))) { Inode freeInode = descedant.getInodeOrNull(); if (freeInode != null && freeInode.isFile()) { if (freeInode.getPersistenceState() != PersistenceState.PERSISTED) { throw new UnexpectedAlluxioException(ExceptionMessage.CANNOT_FREE_NON_PERSISTED_FILE .getMessage(mInodeTree.getPath(freeInode))); } if (freeInode.isPinned()) { if (!context.getOptions().getForced()) { throw new UnexpectedAlluxioException(ExceptionMessage.CANNOT_FREE_PINNED_FILE .getMessage(mInodeTree.getPath(freeInode))); } SetAttributeContext setAttributeContext = SetAttributeContext .mergeFrom(SetAttributePOptions.newBuilder().setRecursive(false).setPinned(false)); setAttributeSingleFile(rpcContext, descedant, true, opTimeMs, setAttributeContext); } // Remove corresponding blocks from workers. mBlockMaster.removeBlocks(freeInode.asFile().getBlockIds(), false /* delete */); } } } Metrics.FILES_FREED.inc(freeInodes.size()); } @Override public AlluxioURI getPath(long fileId) throws FileDoesNotExistException { try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(fileId, LockPattern.READ)) { // the path is already locked. return mInodeTree.getPath(inodePath.getInode()); } } @Override public Set<Long> getPinIdList() { // return both the explicitly pinned inodes and not persisted inodes which should not be evicted return Sets.union(mInodeTree.getPinIdSet(), mInodeTree.getToBePersistedIds()); } @Override public String getUfsAddress() { return ServerConfiguration.get(PropertyKey.MASTER_MOUNT_TABLE_ROOT_UFS); } @Override public UfsInfo getUfsInfo(long mountId) { MountInfo info = mMountTable.getMountInfo(mountId); if (info == null) { return new UfsInfo(); } MountPOptions options = info.getOptions(); return new UfsInfo().setUri(info.getUfsUri()) .setMountOptions(MountContext .mergeFrom(MountPOptions.newBuilder().putAllProperties(options.getPropertiesMap()) .setReadOnly(options.getReadOnly()).setShared(options.getShared())) .getOptions().build()); } @Override public List<String> getWhiteList() { return mWhitelist.getList(); } @Override public List<Long> getLostFiles() { Set<Long> lostFiles = new HashSet<>(); for (long blockId : mBlockMaster.getLostBlocks()) { // the file id is the container id of the block id long containerId = BlockId.getContainerId(blockId); long fileId = IdUtils.createFileId(containerId); lostFiles.add(fileId); } return new ArrayList<>(lostFiles); } /** * Loads metadata for the path if it is (non-existing || load direct children is set). * * See {@link #shouldLoadMetadataIfNotExists(LockedInodePath, LoadMetadataContext)}. * * @param rpcContext the rpc context * @param path the path to load metadata for * @param context the {@link LoadMetadataContext} * @param isGetFileInfo whether this is loading for a {@link #getFileInfo} call */ private void loadMetadataIfNotExist(RpcContext rpcContext, AlluxioURI path, LoadMetadataContext context, boolean isGetFileInfo) throws InvalidPathException, AccessControlException { DescendantType syncDescendantType = GrpcUtils.fromProto(context.getOptions().getLoadDescendantType()); FileSystemMasterCommonPOptions commonOptions = context.getOptions().getCommonOptions(); // load metadata only and force sync InodeSyncStream sync = new InodeSyncStream(new LockingScheme(path, LockPattern.READ, false), this, rpcContext, syncDescendantType, commonOptions, isGetFileInfo, true, true); if (!sync.sync()) { LOG.debug("Failed to load metadata for path from UFS: {}", path); } } boolean shouldLoadMetadataIfNotExists(LockedInodePath inodePath, LoadMetadataContext context) { boolean inodeExists = inodePath.fullPathExists(); boolean loadDirectChildren = false; if (inodeExists) { try { Inode inode = inodePath.getInode(); loadDirectChildren = inode.isDirectory() && (context.getOptions().getLoadDescendantType() != LoadDescendantPType.NONE); } catch (FileDoesNotExistException e) { // This should never happen. throw new RuntimeException(e); } } return !inodeExists || loadDirectChildren; } private void prepareForMount(AlluxioURI ufsPath, long mountId, MountContext context) throws IOException { MountPOptions.Builder mountOption = context.getOptions(); try (CloseableResource<UnderFileSystem> ufsResource = mUfsManager.get(mountId).acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); // Check that the ufsPath exists and is a directory if (!ufs.isDirectory(ufsPath.toString())) { throw new IOException( ExceptionMessage.UFS_PATH_DOES_NOT_EXIST.getMessage(ufsPath.toString())); } if (UnderFileSystemUtils.isWeb(ufs)) { mountOption.setReadOnly(true); } } } private void updateMountInternal(Supplier<JournalContext> journalContext, LockedInodePath inodePath, AlluxioURI ufsPath, MountInfo mountInfo, MountContext context) throws FileAlreadyExistsException, InvalidPathException, IOException { long newMountId = IdUtils.createMountId(); // lock sync manager to ensure no sync point is added before the mount point is removed try (LockResource r = new LockResource(mSyncManager.getLock())) { List<AlluxioURI> syncPoints = mSyncManager.getFilterList(mountInfo.getMountId()); if (syncPoints != null && !syncPoints.isEmpty()) { throw new InvalidArgumentException("Updating a mount point with ActiveSync enabled is not" + " supported. Please remove all sync'ed paths from the mount point and try again."); } AlluxioURI alluxioPath = inodePath.getUri(); // validate new UFS client before updating the mount table mUfsManager.addMount(newMountId, new AlluxioURI(ufsPath.toString()), UnderFileSystemConfiguration.defaults(ServerConfiguration.global()) .setReadOnly(context.getOptions().getReadOnly()) .setShared(context.getOptions().getShared()) .createMountSpecificConf(context.getOptions().getPropertiesMap())); prepareForMount(ufsPath, newMountId, context); // old ufsClient is removed as part of the mount table update process mMountTable.update(journalContext, alluxioPath, newMountId, context.getOptions().build()); } catch (FileAlreadyExistsException | InvalidPathException | IOException e) { // revert everything mUfsManager.removeMount(newMountId); throw e; } } @Override public void updateMount(AlluxioURI alluxioPath, MountContext context) throws FileAlreadyExistsException, FileDoesNotExistException, InvalidPathException, IOException, AccessControlException { LockingScheme lockingScheme = createLockingScheme(alluxioPath, context.getOptions().getCommonOptions(), LockPattern.WRITE_EDGE); try (RpcContext rpcContext = createRpcContext(context); LockedInodePath inodePath = mInodeTree .lockInodePath(lockingScheme.getPath(), lockingScheme.getPattern()); FileSystemMasterAuditContext auditContext = createAuditContext( "updateMount", alluxioPath, null, inodePath.getParentInodeOrNull())) { try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } MountInfo mountInfo = mMountTable.getMountTable().get(alluxioPath.getPath()); if (mountInfo == null) { throw new InvalidPathException("Failed to update mount properties for " + inodePath.getUri() + ". Please ensure the path is an existing mount point."); } updateMountInternal(rpcContext, inodePath, mountInfo.getUfsUri(), mountInfo, context); auditContext.setSucceeded(true); } } @Override public void mount(AlluxioURI alluxioPath, AlluxioURI ufsPath, MountContext context) throws FileAlreadyExistsException, FileDoesNotExistException, InvalidPathException, IOException, AccessControlException { Metrics.MOUNT_OPS.inc(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("mount", alluxioPath, null, null)) { ufsPath = new AlluxioURI(PathUtils.normalizePath(ufsPath.toString(), AlluxioURI.SEPARATOR)); syncMetadata(rpcContext, alluxioPath, context.getOptions().getCommonOptions(), DescendantType.ONE, auditContext, LockedInodePath::getParentInodeOrNull, (inodePath, permChecker) -> permChecker.checkParentPermission(Mode.Bits.WRITE, inodePath) ); LockingScheme lockingScheme = createLockingScheme(alluxioPath, context.getOptions().getCommonOptions(), LockPattern.WRITE_EDGE); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { auditContext.setSrcInode(inodePath.getParentInodeOrNull()); try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } mMountTable.checkUnderWritableMountPoint(alluxioPath); mountInternal(rpcContext, inodePath, ufsPath, context); auditContext.setSucceeded(true); Metrics.PATHS_MOUNTED.inc(); } } } /** * Mounts a UFS path onto an Alluxio path. * * @param rpcContext the rpc context * @param inodePath the Alluxio path to mount to * @param ufsPath the UFS path to mount * @param context the mount context */ private void mountInternal(RpcContext rpcContext, LockedInodePath inodePath, AlluxioURI ufsPath, MountContext context) throws InvalidPathException, FileAlreadyExistsException, FileDoesNotExistException, IOException, AccessControlException { // Check that the Alluxio Path does not exist if (inodePath.fullPathExists()) { // TODO(calvin): Add a test to validate this (ALLUXIO-1831) throw new InvalidPathException( ExceptionMessage.MOUNT_POINT_ALREADY_EXISTS.getMessage(inodePath.getUri())); } long mountId = IdUtils.createMountId(); mountInternal(rpcContext, inodePath, ufsPath, mountId, context); boolean loadMetadataSucceeded = false; try { // This will create the directory at alluxioPath InodeSyncStream.loadDirectoryMetadata(rpcContext, inodePath, LoadMetadataContext.mergeFrom( LoadMetadataPOptions.newBuilder().setCreateAncestors(false)), mMountTable, this); loadMetadataSucceeded = true; } finally { if (!loadMetadataSucceeded) { mMountTable.delete(rpcContext, inodePath.getUri(), true); } } } /** * Updates the mount table with the specified mount point. The mount options may be updated during * this method. * * @param journalContext the journal context * @param inodePath the Alluxio mount point * @param ufsPath the UFS endpoint to mount * @param mountId the mount id * @param context the mount context (may be updated) */ private void mountInternal(Supplier<JournalContext> journalContext, LockedInodePath inodePath, AlluxioURI ufsPath, long mountId, MountContext context) throws FileAlreadyExistsException, InvalidPathException, IOException { AlluxioURI alluxioPath = inodePath.getUri(); // Adding the mount point will not create the UFS instance and thus not connect to UFS mUfsManager.addMount(mountId, new AlluxioURI(ufsPath.toString()), UnderFileSystemConfiguration.defaults(ServerConfiguration.global()) .setReadOnly(context.getOptions().getReadOnly()) .setShared(context.getOptions().getShared()) .createMountSpecificConf(context.getOptions().getPropertiesMap())); try { prepareForMount(ufsPath, mountId, context); // Check that the alluxioPath we're creating doesn't shadow a path in the parent UFS MountTable.Resolution resolution = mMountTable.resolve(alluxioPath); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { String ufsResolvedPath = resolution.getUri().getPath(); if (ufsResource.get().exists(ufsResolvedPath)) { throw new IOException( ExceptionMessage.MOUNT_PATH_SHADOWS_PARENT_UFS.getMessage(alluxioPath, ufsResolvedPath)); } } // Add the mount point. This will only succeed if we are not mounting a prefix of an existing // mount. mMountTable.add(journalContext, alluxioPath, ufsPath, mountId, context.getOptions().build()); } catch (Exception e) { mUfsManager.removeMount(mountId); throw e; } } @Override public void unmount(AlluxioURI alluxioPath) throws FileDoesNotExistException, InvalidPathException, IOException, AccessControlException { Metrics.UNMOUNT_OPS.inc(); // Unmount should lock the parent to remove the child inode. try (RpcContext rpcContext = createRpcContext(); LockedInodePath inodePath = mInodeTree .lockInodePath(alluxioPath, LockPattern.WRITE_EDGE); FileSystemMasterAuditContext auditContext = createAuditContext("unmount", alluxioPath, null, inodePath.getInodeOrNull())) { try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } unmountInternal(rpcContext, inodePath); auditContext.setSucceeded(true); Metrics.PATHS_UNMOUNTED.inc(); } } /** * Unmounts a UFS path previously mounted onto an Alluxio path. * * This method does not delete blocks. Instead, it adds the to the passed-in block deletion * context so that the blocks can be deleted after the inode deletion journal entry has been * written. We cannot delete blocks earlier because the inode deletion may fail, leaving us with * inode containing deleted blocks. * * @param rpcContext the rpc context * @param inodePath the Alluxio path to unmount, must be a mount point */ private void unmountInternal(RpcContext rpcContext, LockedInodePath inodePath) throws InvalidPathException, FileDoesNotExistException, IOException { if (!inodePath.fullPathExists()) { throw new FileDoesNotExistException( "Failed to unmount: Path " + inodePath.getUri() + " does not exist"); } MountInfo mountInfo = mMountTable.getMountTable().get(inodePath.getUri().getPath()); if (mountInfo == null) { throw new InvalidPathException("Failed to unmount " + inodePath.getUri() + ". Please ensure" + " the path is an existing mount point."); } mSyncManager.stopSyncForMount(mountInfo.getMountId()); if (!mMountTable.delete(rpcContext, inodePath.getUri(), true)) { throw new InvalidPathException("Failed to unmount " + inodePath.getUri() + ". Please ensure" + " the path is an existing mount point and not root."); } try { // Use the internal delete API, setting {@code alluxioOnly} to true to prevent the delete // operations from being persisted in the UFS. deleteInternal(rpcContext, inodePath, DeleteContext .mergeFrom(DeletePOptions.newBuilder().setRecursive(true).setAlluxioOnly(true))); } catch (DirectoryNotEmptyException e) { throw new RuntimeException(String.format( "We should never see this exception because %s should never be thrown when recursive " + "is true.", e.getClass())); } } @Override public void setAcl(AlluxioURI path, SetAclAction action, List<AclEntry> entries, SetAclContext context) throws FileDoesNotExistException, AccessControlException, InvalidPathException, IOException { Metrics.SET_ACL_OPS.inc(); try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext("setAcl", path, null, null)) { syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), context.getOptions().getRecursive() ? DescendantType.ALL : DescendantType.NONE, auditContext, LockedInodePath::getInodeOrNull, (inodePath, permChecker) -> permChecker.checkSetAttributePermission(inodePath, false, true, false) ); LockingScheme lockingScheme = createLockingScheme(path, context.getOptions().getCommonOptions(), LockPattern.WRITE_INODE); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { mPermissionChecker.checkSetAttributePermission(inodePath, false, true, false); if (context.getOptions().getRecursive()) { try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) { for (LockedInodePath child : descendants) { mPermissionChecker.checkSetAttributePermission(child, false, true, false); } } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } } if (!inodePath.fullPathExists()) { throw new FileDoesNotExistException(ExceptionMessage .PATH_DOES_NOT_EXIST.getMessage(path)); } setAclInternal(rpcContext, action, inodePath, entries, context); auditContext.setSucceeded(true); } } } private void setAclInternal(RpcContext rpcContext, SetAclAction action, LockedInodePath inodePath, List<AclEntry> entries, SetAclContext context) throws IOException, FileDoesNotExistException { Preconditions.checkState(inodePath.getLockPattern().isWrite()); long opTimeMs = System.currentTimeMillis(); // Check inputs for setAcl switch (action) { case REPLACE: Set<AclEntryType> types = entries.stream().map(AclEntry::getType).collect(Collectors.toSet()); Set<AclEntryType> requiredTypes = Sets.newHashSet(AclEntryType.OWNING_USER, AclEntryType.OWNING_GROUP, AclEntryType.OTHER); requiredTypes.removeAll(types); // make sure the required entries are present if (!requiredTypes.isEmpty()) { throw new IOException(ExceptionMessage.ACL_BASE_REQUIRED.getMessage( String.join(", ", requiredTypes.stream().map(AclEntryType::toString).collect( Collectors.toList())))); } break; case MODIFY: // fall through case REMOVE: if (entries.isEmpty()) { // Nothing to do. return; } break; case REMOVE_ALL: break; case REMOVE_DEFAULT: break; default: } setAclRecursive(rpcContext, action, inodePath, entries, false, opTimeMs, context); } private void setUfsAcl(LockedInodePath inodePath) throws InvalidPathException, AccessControlException { Inode inode = inodePath.getInodeOrNull(); checkUfsMode(inodePath.getUri(), OperationType.WRITE); MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); String ufsUri = resolution.getUri().toString(); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); if (ufs.isObjectStorage()) { LOG.warn("SetACL is not supported to object storage UFS via Alluxio. " + "UFS: " + ufsUri + ". This has no effect on the underlying object."); } else { try { List<AclEntry> entries = new ArrayList<>(inode.getACL().getEntries()); if (inode.isDirectory()) { entries.addAll(inode.asDirectory().getDefaultACL().getEntries()); } ufs.setAclEntries(ufsUri, entries); } catch (IOException e) { throw new AccessControlException("Could not setAcl for UFS file: " + ufsUri); } } } } private void setAclSingleInode(RpcContext rpcContext, SetAclAction action, LockedInodePath inodePath, List<AclEntry> entries, boolean replay, long opTimeMs) throws IOException, FileDoesNotExistException { Preconditions.checkState(inodePath.getLockPattern().isWrite()); Inode inode = inodePath.getInode(); // Check that we are not removing an extended mask. if (action == SetAclAction.REMOVE) { for (AclEntry entry : entries) { if ((entry.isDefault() && inode.getDefaultACL().hasExtended()) || (!entry.isDefault() && inode.getACL().hasExtended())) { if (entry.getType() == AclEntryType.MASK) { throw new InvalidArgumentException( "Deleting the mask for an extended ACL is not allowed. entry: " + entry); } } } } // Check that we are not setting default ACL to a file if (inode.isFile()) { for (AclEntry entry : entries) { if (entry.isDefault()) { throw new UnsupportedOperationException("Can not set default ACL for a file"); } } } mInodeTree.setAcl(rpcContext, SetAclEntry.newBuilder() .setId(inode.getId()) .setOpTimeMs(opTimeMs) .setAction(ProtoUtils.toProto(action)) .addAllEntries(entries.stream().map(ProtoUtils::toProto).collect(Collectors.toList())) .build()); try { if (!replay && inode.isPersisted()) { setUfsAcl(inodePath); } } catch (InvalidPathException | AccessControlException e) { LOG.warn("Setting ufs ACL failed for path: {}", inodePath.getUri(), e); // TODO(david): revert the acl and default acl to the initial state if writing to ufs failed. } } private void setAclRecursive(RpcContext rpcContext, SetAclAction action, LockedInodePath inodePath, List<AclEntry> entries, boolean replay, long opTimeMs, SetAclContext context) throws IOException, FileDoesNotExistException { Preconditions.checkState(inodePath.getLockPattern().isWrite()); setAclSingleInode(rpcContext, action, inodePath, entries, replay, opTimeMs); if (context.getOptions().getRecursive()) { try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) { for (LockedInodePath childPath : descendants) { rpcContext.throwIfCancelled(); setAclSingleInode(rpcContext, action, childPath, entries, replay, opTimeMs); } } } } @Override public void setAttribute(AlluxioURI path, SetAttributeContext context) throws FileDoesNotExistException, AccessControlException, InvalidPathException, IOException { SetAttributePOptions.Builder options = context.getOptions(); Metrics.SET_ATTRIBUTE_OPS.inc(); // for chown boolean rootRequired = options.hasOwner(); // for chgrp, chmod boolean ownerRequired = (options.hasGroup()) || (options.hasMode()); // for other attributes boolean writeRequired = !rootRequired && !ownerRequired; if (options.hasOwner() && options.hasGroup()) { try { checkUserBelongsToGroup(options.getOwner(), options.getGroup()); } catch (IOException e) { throw new IOException(String.format("Could not update owner:group for %s to %s:%s. %s", path.toString(), options.getOwner(), options.getGroup(), e.toString()), e); } } String commandName; boolean checkWritableMountPoint = false; if (options.hasOwner()) { commandName = "chown"; checkWritableMountPoint = true; } else if (options.hasGroup()) { commandName = "chgrp"; checkWritableMountPoint = true; } else if (options.hasMode()) { commandName = "chmod"; checkWritableMountPoint = true; } else { commandName = "setAttribute"; } try (RpcContext rpcContext = createRpcContext(context); FileSystemMasterAuditContext auditContext = createAuditContext(commandName, path, null, null)) { // Force recursive sync metadata if it is a pinning and unpinning operation boolean recursiveSync = options.hasPinned() || options.getRecursive(); syncMetadata(rpcContext, path, context.getOptions().getCommonOptions(), recursiveSync ? DescendantType.ALL : DescendantType.ONE, auditContext, LockedInodePath::getInodeOrNull, (inodePath, permChecker) -> permChecker.checkSetAttributePermission( inodePath, rootRequired, ownerRequired, writeRequired) ); LockingScheme lockingScheme = createLockingScheme(path, options.getCommonOptions(), LockPattern.WRITE_INODE); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme)) { auditContext.setSrcInode(inodePath.getInodeOrNull()); if (checkWritableMountPoint) { mMountTable.checkUnderWritableMountPoint(path); } if (!inodePath.fullPathExists()) { throw new FileDoesNotExistException(ExceptionMessage .PATH_DOES_NOT_EXIST.getMessage(path)); } try { mPermissionChecker .checkSetAttributePermission(inodePath, rootRequired, ownerRequired, writeRequired); if (context.getOptions().getRecursive()) { try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) { for (LockedInodePath childPath : descendants) { mPermissionChecker .checkSetAttributePermission(childPath, rootRequired, ownerRequired, writeRequired); } } } } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } setAttributeInternal(rpcContext, inodePath, context); auditContext.setSucceeded(true); } } } /** * Checks whether the owner belongs to the group. * * @param owner the owner to check * @param group the group to check * @throws FailedPreconditionException if owner does not belong to group */ private void checkUserBelongsToGroup(String owner, String group) throws IOException { List<String> groups = CommonUtils.getGroups(owner, ServerConfiguration.global()); if (groups == null || !groups.contains(group)) { throw new FailedPreconditionException("Owner " + owner + " does not belong to the group " + group); } } /** * Sets the file attribute. * * @param rpcContext the rpc context * @param inodePath the {@link LockedInodePath} to set attribute for * @param context attributes to be set, see {@link SetAttributePOptions} */ private void setAttributeInternal(RpcContext rpcContext, LockedInodePath inodePath, SetAttributeContext context) throws InvalidPathException, FileDoesNotExistException, AccessControlException, IOException { Inode targetInode = inodePath.getInode(); long opTimeMs = System.currentTimeMillis(); if (context.getOptions().getRecursive() && targetInode.isDirectory()) { try (LockedInodePathList descendants = mInodeTree.getDescendants(inodePath)) { for (LockedInodePath childPath : descendants) { rpcContext.throwIfCancelled(); setAttributeSingleFile(rpcContext, childPath, true, opTimeMs, context); } } } setAttributeSingleFile(rpcContext, inodePath, true, opTimeMs, context); } @Override public void scheduleAsyncPersistence(AlluxioURI path, ScheduleAsyncPersistenceContext context) throws AlluxioException, UnavailableException { try (RpcContext rpcContext = createRpcContext(context); LockedInodePath inodePath = mInodeTree.lockFullInodePath(path, LockPattern.WRITE_INODE)) { scheduleAsyncPersistenceInternal(inodePath, context, rpcContext); } } private void scheduleAsyncPersistenceInternal(LockedInodePath inodePath, ScheduleAsyncPersistenceContext context, RpcContext rpcContext) throws InvalidPathException, FileDoesNotExistException { InodeFile inode = inodePath.getInodeFile(); if (!inode.isCompleted()) { throw new InvalidPathException( "Cannot persist an incomplete Alluxio file: " + inodePath.getUri()); } if (shouldPersistPath(inodePath.toString())) { mInodeTree.updateInode(rpcContext, UpdateInodeEntry.newBuilder().setId(inode.getId()) .setPersistenceState(PersistenceState.TO_BE_PERSISTED.name()).build()); mPersistRequests.put(inode.getId(), new alluxio.time.ExponentialTimer( ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS), ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS), context.getPersistenceWaitTime(), ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS))); } } /** * Actively sync metadata, based on a list of changed files. * * @param path the path to sync * @param changedFiles collection of files that are changed under the path to sync, if this is * null, force sync the entire directory * @param executorService executor to execute the parallel incremental sync */ public void activeSyncMetadata(AlluxioURI path, Collection<AlluxioURI> changedFiles, ExecutorService executorService) throws IOException { if (changedFiles == null) { LOG.info("Start an active full sync of {}", path.toString()); } else { LOG.info("Start an active incremental sync of {} files", changedFiles.size()); } long start = System.currentTimeMillis(); if (changedFiles != null && changedFiles.isEmpty()) { return; } try (RpcContext rpcContext = createRpcContext()) { if (changedFiles == null) { // full sync // Set sync interval to 0 to force a sync. FileSystemMasterCommonPOptions options = FileSystemMasterCommonPOptions.newBuilder().setSyncIntervalMs(0).build(); LockingScheme scheme = createSyncLockingScheme(path, options, false); InodeSyncStream sync = new InodeSyncStream(scheme, this, rpcContext, DescendantType.ALL, options, false, false, false); if (!sync.sync()) { LOG.debug("Active full sync on {} didn't sync any paths.", path); } long end = System.currentTimeMillis(); LOG.info("Ended an active full sync of {} in {}ms", path.toString(), end - start); return; } else { // incremental sync Set<Callable<Void>> callables = new HashSet<>(); for (AlluxioURI changedFile : changedFiles) { callables.add(() -> { // Set sync interval to 0 to force a sync. FileSystemMasterCommonPOptions options = FileSystemMasterCommonPOptions.newBuilder().setSyncIntervalMs(0).build(); LockingScheme scheme = createSyncLockingScheme(changedFile, options, false); InodeSyncStream sync = new InodeSyncStream(scheme, this, rpcContext, DescendantType.ONE, options, false, false, false); if (!sync.sync()) { // Use debug because this can be a noisy log LOG.debug("Incremental sync on {} didn't sync any paths.", path); } return null; }); } executorService.invokeAll(callables); } } catch (InterruptedException e) { LOG.warn("InterruptedException during active sync: {}", e.toString()); Thread.currentThread().interrupt(); return; } catch (InvalidPathException | AccessControlException e) { LogUtils.warnWithException(LOG, "Failed to active sync on path {}", path, e); } if (changedFiles != null) { long end = System.currentTimeMillis(); LOG.info("Ended an active incremental sync of {} files in {}ms", changedFiles.size(), end - start); } } @Override public boolean recordActiveSyncTxid(long txId, long mountId) { MountInfo mountInfo = mMountTable.getMountInfo(mountId); if (mountInfo == null) { return false; } AlluxioURI mountPath = mountInfo.getAlluxioUri(); try (RpcContext rpcContext = createRpcContext(); LockedInodePath inodePath = mInodeTree .lockFullInodePath(mountPath, LockPattern.READ)) { File.ActiveSyncTxIdEntry txIdEntry = File.ActiveSyncTxIdEntry.newBuilder().setTxId(txId).setMountId(mountId).build(); rpcContext.journal(JournalEntry.newBuilder().setActiveSyncTxId(txIdEntry).build()); } catch (UnavailableException | InvalidPathException | FileDoesNotExistException e) { LOG.warn("Exception when recording activesync txid, path {}, exception {}", mountPath, e); return false; } return true; } private boolean syncMetadata(RpcContext rpcContext, AlluxioURI path, FileSystemMasterCommonPOptions options, DescendantType syncDescendantType, @Nullable FileSystemMasterAuditContext auditContext, @Nullable Function<LockedInodePath, Inode> auditContextSrcInodeFunc, @Nullable PermissionCheckFunction permissionCheckOperation) throws AccessControlException, InvalidPathException { return syncMetadata(rpcContext, path, options, syncDescendantType, auditContext, auditContextSrcInodeFunc, permissionCheckOperation, false); } /** * Sync metadata for an Alluxio path with the UFS. * * @param rpcContext the current RPC context * @param path the path to sync * @param options options included with the RPC * @param syncDescendantType how deep the sync should be performed * @param auditContextSrcInodeFunc the src inode for the audit context, if null, no source inode * is set on the audit context * @param permissionCheckOperation a consumer that accepts a locked inode path and a * {@link PermissionChecker}. The consumer is expected to call one * of the permission checkers functions with the given inode path. * If null, no permission checking is performed * @param isGetFileInfo true if syncing for a getFileInfo operation * @return */ private boolean syncMetadata(RpcContext rpcContext, AlluxioURI path, FileSystemMasterCommonPOptions options, DescendantType syncDescendantType, @Nullable FileSystemMasterAuditContext auditContext, @Nullable Function<LockedInodePath, Inode> auditContextSrcInodeFunc, @Nullable PermissionCheckFunction permissionCheckOperation, boolean isGetFileInfo) throws AccessControlException, InvalidPathException { LockingScheme syncScheme = createSyncLockingScheme(path, options, isGetFileInfo); if (!syncScheme.shouldSync()) { return false; } InodeSyncStream sync = new InodeSyncStream(syncScheme, this, rpcContext, syncDescendantType, options, auditContext, auditContextSrcInodeFunc, permissionCheckOperation, isGetFileInfo, false, false); return sync.sync(); } @FunctionalInterface interface PermissionCheckFunction { /** * Performs this operation on the given arguments. * * @param l the first input argument * @param c the second input argument */ void accept(LockedInodePath l, PermissionChecker c) throws AccessControlException, InvalidPathException; } ReadOnlyInodeStore getInodeStore() { return mInodeStore; } InodeTree getInodeTree() { return mInodeTree; } InodeLockManager getInodeLockManager() { return mInodeLockManager; } MountTable getMountTable() { return mMountTable; } UfsSyncPathCache getSyncPathCache() { return mUfsSyncPathCache; } PermissionChecker getPermissionChecker() { return mPermissionChecker; } @Override public FileSystemCommand workerHeartbeat(long workerId, List<Long> persistedFiles, WorkerHeartbeatContext context) throws IOException { List<String> persistedUfsFingerprints = context.getOptions().getPersistedFileFingerprintsList(); boolean hasPersistedFingerprints = persistedUfsFingerprints.size() == persistedFiles.size(); for (int i = 0; i < persistedFiles.size(); i++) { long fileId = persistedFiles.get(i); String ufsFingerprint = hasPersistedFingerprints ? persistedUfsFingerprints.get(i) : Constants.INVALID_UFS_FINGERPRINT; try { // Permission checking for each file is performed inside setAttribute setAttribute(getPath(fileId), SetAttributeContext .mergeFrom(SetAttributePOptions.newBuilder().setPersisted(true)) .setUfsFingerprint(ufsFingerprint)); } catch (FileDoesNotExistException | AccessControlException | InvalidPathException e) { LOG.error("Failed to set file {} as persisted, because {}", fileId, e); } } // TODO(zac) Clean up master and worker code since this is taken care of by job service now. // Worker should not persist any files. Instead, files are persisted through job service. List<PersistFile> filesToPersist = new ArrayList<>(); FileSystemCommandOptions commandOptions = new FileSystemCommandOptions(); commandOptions.setPersistOptions(new PersistCommandOptions(filesToPersist)); return new FileSystemCommand(CommandType.Persist, commandOptions); } /** * @param inodePath the {@link LockedInodePath} to use * @param updateUfs whether to update the UFS with the attribute change * @param opTimeMs the operation time (in milliseconds) * @param context the method context */ protected void setAttributeSingleFile(RpcContext rpcContext, LockedInodePath inodePath, boolean updateUfs, long opTimeMs, SetAttributeContext context) throws FileDoesNotExistException, InvalidPathException, AccessControlException { Inode inode = inodePath.getInode(); SetAttributePOptions.Builder protoOptions = context.getOptions(); if (protoOptions.hasPinned()) { mInodeTree.setPinned(rpcContext, inodePath, context.getOptions().getPinned(), context.getOptions().getPinnedMediaList(), opTimeMs); } UpdateInodeEntry.Builder entry = UpdateInodeEntry.newBuilder().setId(inode.getId()); if (protoOptions.hasReplicationMax() || protoOptions.hasReplicationMin()) { Integer replicationMax = protoOptions.hasReplicationMax() ? protoOptions.getReplicationMax() : null; Integer replicationMin = protoOptions.hasReplicationMin() ? protoOptions.getReplicationMin() : null; mInodeTree.setReplication(rpcContext, inodePath, replicationMax, replicationMin, opTimeMs); } // protoOptions may not have both fields set if (protoOptions.hasCommonOptions()) { FileSystemMasterCommonPOptions commonOpts = protoOptions.getCommonOptions(); TtlAction action = commonOpts.hasTtlAction() ? commonOpts.getTtlAction() : null; Long ttl = commonOpts.hasTtl() ? commonOpts.getTtl() : null; boolean modified = false; if (ttl != null && inode.getTtl() != ttl) { entry.setTtl(ttl); modified = true; } if (action != null && inode.getTtlAction() != action) { entry.setTtlAction(ProtobufUtils.toProtobuf(action)); modified = true; } if (modified) { entry.setLastModificationTimeMs(opTimeMs); } } if (protoOptions.hasPersisted()) { Preconditions.checkArgument(inode.isFile(), PreconditionMessage.PERSIST_ONLY_FOR_FILE); Preconditions.checkArgument(inode.asFile().isCompleted(), PreconditionMessage.FILE_TO_PERSIST_MUST_BE_COMPLETE); // TODO(manugoyal) figure out valid behavior in the un-persist case Preconditions .checkArgument(protoOptions.getPersisted(), PreconditionMessage.ERR_SET_STATE_UNPERSIST); if (!inode.asFile().isPersisted()) { entry.setPersistenceState(PersistenceState.PERSISTED.name()); entry.setLastModificationTimeMs(context.getOperationTimeMs()); propagatePersistedInternal(rpcContext, inodePath); Metrics.FILES_PERSISTED.inc(); } } boolean ownerGroupChanged = (protoOptions.hasOwner()) || (protoOptions.hasGroup()); boolean modeChanged = protoOptions.hasMode(); // If the file is persisted in UFS, also update corresponding owner/group/permission. if ((ownerGroupChanged || modeChanged) && updateUfs && inode.isPersisted()) { if ((inode instanceof InodeFile) && !inode.asFile().isCompleted()) { LOG.debug("Alluxio does not propagate chown/chgrp/chmod to UFS for incomplete files."); } else { checkUfsMode(inodePath.getUri(), OperationType.WRITE); MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); String ufsUri = resolution.getUri().toString(); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); if (ufs.isObjectStorage()) { LOG.debug("setOwner/setMode is not supported to object storage UFS via Alluxio. " + "UFS: " + ufsUri + ". This has no effect on the underlying object."); } else { String owner = null; String group = null; String mode = null; if (ownerGroupChanged) { try { owner = protoOptions.getOwner() != null ? protoOptions.getOwner() : inode.getOwner(); group = protoOptions.getGroup() != null ? protoOptions.getGroup() : inode.getGroup(); ufs.setOwner(ufsUri, owner, group); } catch (IOException e) { throw new AccessControlException("Could not setOwner for UFS file " + ufsUri + " . Aborting the setAttribute operation in Alluxio.", e); } } if (modeChanged) { try { mode = String.valueOf(protoOptions.getMode()); ufs.setMode(ufsUri, ModeUtils.protoToShort(protoOptions.getMode())); } catch (IOException e) { throw new AccessControlException("Could not setMode for UFS file " + ufsUri + " . Aborting the setAttribute operation in Alluxio.", e); } } // Retrieve the ufs fingerprint after the ufs changes. String existingFingerprint = inode.getUfsFingerprint(); if (!existingFingerprint.equals(Constants.INVALID_UFS_FINGERPRINT)) { // Update existing fingerprint, since contents did not change Fingerprint fp = Fingerprint.parse(existingFingerprint); fp.putTag(Fingerprint.Tag.OWNER, owner); fp.putTag(Fingerprint.Tag.GROUP, group); fp.putTag(Fingerprint.Tag.MODE, mode); context.setUfsFingerprint(fp.serialize()); } else { // Need to retrieve the fingerprint from ufs. context.setUfsFingerprint(ufs.getFingerprint(ufsUri)); } } } } } if (!context.getUfsFingerprint().equals(Constants.INVALID_UFS_FINGERPRINT)) { entry.setUfsFingerprint(context.getUfsFingerprint()); } // Only commit the set permission to inode after the propagation to UFS succeeded. if (protoOptions.hasOwner()) { entry.setOwner(protoOptions.getOwner()); } if (protoOptions.hasGroup()) { entry.setGroup(protoOptions.getGroup()); } if (modeChanged) { entry.setMode(ModeUtils.protoToShort(protoOptions.getMode())); } mInodeTree.updateInode(rpcContext, entry.build()); } @Override public List<SyncPointInfo> getSyncPathList() { return mSyncManager.getSyncPathList(); } @Override public void startSync(AlluxioURI syncPoint) throws IOException, InvalidPathException, AccessControlException, ConnectionFailedException { LockingScheme lockingScheme = new LockingScheme(syncPoint, LockPattern.WRITE_EDGE, true); try (RpcContext rpcContext = createRpcContext(); LockedInodePath inodePath = mInodeTree .lockInodePath(lockingScheme.getPath(), lockingScheme.getPattern()); FileSystemMasterAuditContext auditContext = createAuditContext("startSync", syncPoint, null, inodePath.getParentInodeOrNull())) { try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } mSyncManager.startSyncAndJournal(rpcContext, syncPoint); auditContext.setSucceeded(true); } } @Override public void stopSync(AlluxioURI syncPoint) throws IOException, InvalidPathException, AccessControlException { try (RpcContext rpcContext = createRpcContext()) { boolean isSuperUser = true; try { mPermissionChecker.checkSuperUser(); } catch (AccessControlException e) { isSuperUser = false; } if (isSuperUser) { // TODO(AM): Remove once we don't require a write lock on the sync point during a full sync // Stop sync w/o acquiring an inode lock to terminate an initial full scan (if running) mSyncManager.stopSyncAndJournal(rpcContext, syncPoint); } LockingScheme lockingScheme = new LockingScheme(syncPoint, LockPattern.READ, false); try (LockedInodePath inodePath = mInodeTree.lockInodePath(lockingScheme.getPath(), lockingScheme.getPattern()); FileSystemMasterAuditContext auditContext = createAuditContext("stopSync", syncPoint, null, inodePath.getParentInodeOrNull())) { try { mPermissionChecker.checkParentPermission(Mode.Bits.WRITE, inodePath); } catch (AccessControlException e) { auditContext.setAllowed(false); throw e; } if (!isSuperUser) { // Stop sync here only if not terminated w/o holding the inode lock mSyncManager.stopSyncAndJournal(rpcContext, syncPoint); } auditContext.setSucceeded(true); } } } @Override public List<WorkerInfo> getWorkerInfoList() throws UnavailableException { return mBlockMaster.getWorkerInfoList(); } /** * @param fileId file ID * @param jobId persist job ID * @param persistenceWaitTime persistence initial wait time * @param uri Alluxio Uri of the file * @param tempUfsPath temp UFS path */ private void addPersistJob(long fileId, long jobId, long persistenceWaitTime, AlluxioURI uri, String tempUfsPath) { alluxio.time.ExponentialTimer timer = mPersistRequests.remove(fileId); if (timer == null) { timer = new alluxio.time.ExponentialTimer( ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_INITIAL_INTERVAL_MS), ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_INTERVAL_MS), persistenceWaitTime, ServerConfiguration.getMs(PropertyKey.MASTER_PERSISTENCE_MAX_TOTAL_WAIT_TIME_MS)); } mPersistJobs.put(fileId, new PersistJob(jobId, fileId, uri, tempUfsPath, timer)); } private long getPersistenceWaitTime(long shouldPersistTime) { long currentTime = System.currentTimeMillis(); if (shouldPersistTime >= currentTime) { return shouldPersistTime - currentTime; } else { return 0; } } /** * Periodically schedules jobs to persist files and updates metadata accordingly. */ @NotThreadSafe private final class PersistenceScheduler implements alluxio.heartbeat.HeartbeatExecutor { private static final long MAX_QUIET_PERIOD_SECONDS = 64; /** * Quiet period for job service flow control (in seconds). When job service refuses starting new * jobs, we use exponential backoff to alleviate the job service pressure. */ private long mQuietPeriodSeconds; /** * Creates a new instance of {@link PersistenceScheduler}. */ PersistenceScheduler() { mQuietPeriodSeconds = 0; } @Override public void close() {} // Nothing to clean up /** * Updates the file system metadata to reflect the fact that the persist file request expired. * * @param fileId the file ID */ private void handleExpired(long fileId) throws AlluxioException, UnavailableException { try (JournalContext journalContext = createJournalContext(); LockedInodePath inodePath = mInodeTree .lockFullInodePath(fileId, LockPattern.WRITE_INODE)) { InodeFile inode = inodePath.getInodeFile(); switch (inode.getPersistenceState()) { case LOST: // fall through case NOT_PERSISTED: // fall through case PERSISTED: LOG.warn("File {} (id={}) persistence state is {} and will not be changed.", inodePath.getUri(), fileId, inode.getPersistenceState()); return; case TO_BE_PERSISTED: mInodeTree.updateInode(journalContext, UpdateInodeEntry.newBuilder() .setId(inode.getId()) .setPersistenceState(PersistenceState.NOT_PERSISTED.name()) .build()); mInodeTree.updateInodeFile(journalContext, UpdateInodeFileEntry.newBuilder() .setId(inode.getId()) .setPersistJobId(Constants.PERSISTENCE_INVALID_JOB_ID) .setTempUfsPath(Constants.PERSISTENCE_INVALID_UFS_PATH) .build()); break; default: throw new IllegalStateException( "Unrecognized persistence state: " + inode.getPersistenceState()); } } } /** * Attempts to schedule a persist job and updates the file system metadata accordingly. * * @param fileId the file ID */ private void handleReady(long fileId) throws AlluxioException, IOException { alluxio.time.ExponentialTimer timer = mPersistRequests.get(fileId); // Lookup relevant file information. AlluxioURI uri; String tempUfsPath; try (LockedInodePath inodePath = mInodeTree.lockFullInodePath(fileId, LockPattern.READ)) { InodeFile inode = inodePath.getInodeFile(); uri = inodePath.getUri(); switch (inode.getPersistenceState()) { case LOST: // fall through case NOT_PERSISTED: // fall through case PERSISTED: LOG.warn("File {} (id={}) persistence state is {} and will not be changed.", inodePath.getUri(), fileId, inode.getPersistenceState()); return; case TO_BE_PERSISTED: tempUfsPath = inodePath.getInodeFile().getTempUfsPath(); break; default: throw new IllegalStateException( "Unrecognized persistence state: " + inode.getPersistenceState()); } } MountTable.Resolution resolution = mMountTable.resolve(uri); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { // If previous persist job failed, clean up the temporary file. cleanup(ufsResource.get(), tempUfsPath); // Generate a temporary path to be used by the persist job. // If the persist destination is on object store, let persist job copy files to destination // directly if (ServerConfiguration.getBoolean(PropertyKey.MASTER_UNSAFE_DIRECT_PERSIST_OBJECT_ENABLED) && ufsResource.get().isObjectStorage()) { tempUfsPath = resolution.getUri().toString(); } else { tempUfsPath = PathUtils.temporaryFileName( System.currentTimeMillis(), resolution.getUri().toString()); } } PersistConfig config = new PersistConfig(uri.getPath(), resolution.getMountId(), false, tempUfsPath); // Schedule the persist job. long jobId; JobMasterClient client = mJobMasterClientPool.acquire(); try { jobId = client.run(config); } finally { mJobMasterClientPool.release(client); } mQuietPeriodSeconds /= 2; mPersistJobs.put(fileId, new PersistJob(jobId, fileId, uri, tempUfsPath, timer)); // Update the inode and journal the change. try (JournalContext journalContext = createJournalContext(); LockedInodePath inodePath = mInodeTree .lockFullInodePath(fileId, LockPattern.WRITE_INODE)) { InodeFile inode = inodePath.getInodeFile(); mInodeTree.updateInodeFile(journalContext, UpdateInodeFileEntry.newBuilder() .setId(inode.getId()) .setPersistJobId(jobId) .setTempUfsPath(tempUfsPath) .build()); } } /** * {@inheritDoc} * * The method iterates through the set of files to be persisted (identified by their ID) and * attempts to schedule a file persist job. Each iteration removes the file ID from the set * of files to be persisted unless the execution sets the {@code remove} flag to false. * * @throws InterruptedException if the thread is interrupted */ @Override public void heartbeat() throws InterruptedException { java.util.concurrent.TimeUnit.SECONDS.sleep(mQuietPeriodSeconds); // Process persist requests. for (long fileId : mPersistRequests.keySet()) { // Throw if interrupted. if (Thread.interrupted()) { throw new InterruptedException("PersistenceScheduler interrupted."); } boolean remove = true; alluxio.time.ExponentialTimer timer = mPersistRequests.get(fileId); if (timer == null) { // This could occur if a key is removed from mPersistRequests while we are iterating. continue; } alluxio.time.ExponentialTimer.Result timerResult = timer.tick(); if (timerResult == alluxio.time.ExponentialTimer.Result.NOT_READY) { // operation is not ready to be scheduled continue; } AlluxioURI uri = null; try { try (LockedInodePath inodePath = mInodeTree .lockFullInodePath(fileId, LockPattern.READ)) { uri = inodePath.getUri(); } try { checkUfsMode(uri, OperationType.WRITE); } catch (Exception e) { LOG.warn("Unable to schedule persist request for path {}: {}", uri, e.getMessage()); // Retry when ufs mode permits operation remove = false; continue; } switch (timerResult) { case EXPIRED: handleExpired(fileId); break; case READY: handleReady(fileId); break; default: throw new IllegalStateException("Unrecognized timer state: " + timerResult); } } catch (FileDoesNotExistException | InvalidPathException e) { LOG.warn("The file {} (id={}) to be persisted was not found : {}", uri, fileId, e.getMessage()); LOG.debug("Exception: ", e); } catch (UnavailableException e) { LOG.warn("Failed to persist file {}, will retry later: {}", uri, e.toString()); remove = false; } catch (ResourceExhaustedException e) { LOG.warn("The job service is busy, will retry later: {}", e.toString()); LOG.debug("Exception: ", e); mQuietPeriodSeconds = (mQuietPeriodSeconds == 0) ? 1 : Math.min(MAX_QUIET_PERIOD_SECONDS, mQuietPeriodSeconds * 2); remove = false; // End the method here until the next heartbeat. No more jobs should be scheduled during // the current heartbeat if the job master is at full capacity. return; } catch (Exception e) { LOG.warn("Unexpected exception encountered when scheduling the persist job for file {} " + "(id={}) : {}", uri, fileId, e.getMessage()); LOG.debug("Exception: ", e); } finally { if (remove) { mPersistRequests.remove(fileId); } } } } } /** * Periodically polls for the result of the jobs and updates metadata accordingly. */ @NotThreadSafe private final class PersistenceChecker implements alluxio.heartbeat.HeartbeatExecutor { /** * Creates a new instance of {@link PersistenceChecker}. */ PersistenceChecker() {} @Override public void close() {} // nothing to clean up /** * Updates the file system metadata to reflect the fact that the persist job succeeded. * * NOTE: It is the responsibility of the caller to update {@link #mPersistJobs}. * * @param job the successful job */ private void handleSuccess(PersistJob job) { long fileId = job.getFileId(); String tempUfsPath = job.getTempUfsPath(); List<Long> blockIds = new ArrayList<>(); UfsManager.UfsClient ufsClient = null; try (JournalContext journalContext = createJournalContext(); LockedInodePath inodePath = mInodeTree .lockFullInodePath(fileId, LockPattern.WRITE_INODE)) { InodeFile inode = inodePath.getInodeFile(); MountTable.Resolution resolution = mMountTable.resolve(inodePath.getUri()); ufsClient = mUfsManager.get(resolution.getMountId()); switch (inode.getPersistenceState()) { case LOST: // fall through case NOT_PERSISTED: // fall through case PERSISTED: LOG.warn("File {} (id={}) persistence state is {}. Successful persist has no effect.", job.getUri(), fileId, inode.getPersistenceState()); break; case TO_BE_PERSISTED: UpdateInodeEntry.Builder builder = UpdateInodeEntry.newBuilder(); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); String ufsPath = resolution.getUri().toString(); ufs.setOwner(tempUfsPath, inode.getOwner(), inode.getGroup()); ufs.setMode(tempUfsPath, inode.getMode()); if (!ufsPath.equals(tempUfsPath)) { // Make rename only when tempUfsPath is different from final ufsPath. Note that, // on object store, we take the optimization to skip the rename by having // tempUfsPath the same as final ufsPath. if (!ufs.renameRenamableFile(tempUfsPath, ufsPath)) { throw new IOException( String.format("Failed to rename %s to %s.", tempUfsPath, ufsPath)); } } builder.setUfsFingerprint(ufs.getFingerprint(ufsPath)); } mInodeTree.updateInodeFile(journalContext, UpdateInodeFileEntry.newBuilder() .setId(inode.getId()) .setPersistJobId(Constants.PERSISTENCE_INVALID_JOB_ID) .setTempUfsPath(Constants.PERSISTENCE_INVALID_UFS_PATH) .build()); mInodeTree.updateInode(journalContext, builder .setId(inode.getId()) .setPersistenceState(PersistenceState.PERSISTED.name()) .build()); propagatePersistedInternal(journalContext, inodePath); Metrics.FILES_PERSISTED.inc(); // Save state for possible cleanup blockIds.addAll(inode.getBlockIds()); break; default: throw new IllegalStateException( "Unrecognized persistence state: " + inode.getPersistenceState()); } } catch (FileDoesNotExistException | InvalidPathException e) { LOG.warn("The file {} (id={}) to be persisted was not found: {}", job.getUri(), fileId, e.getMessage()); LOG.debug("Exception: ", e); // Cleanup the temporary file. if (ufsClient != null) { try (CloseableResource<UnderFileSystem> ufsResource = ufsClient.acquireUfsResource()) { cleanup(ufsResource.get(), tempUfsPath); } } } catch (Exception e) { LOG.warn( "Unexpected exception encountered when trying to complete persistence of a file {} " + "(id={}) : {}", job.getUri(), fileId, e.getMessage()); LOG.debug("Exception: ", e); if (ufsClient != null) { try (CloseableResource<UnderFileSystem> ufsResource = ufsClient.acquireUfsResource()) { cleanup(ufsResource.get(), tempUfsPath); } } mPersistRequests.put(fileId, job.getTimer()); } // Cleanup possible staging UFS blocks files due to fast durable write fallback. // Note that this is best effort if (ufsClient != null) { for (long blockId : blockIds) { String ufsBlockPath = alluxio.worker.BlockUtils.getUfsBlockPath(ufsClient, blockId); try (CloseableResource<UnderFileSystem> ufsResource = ufsClient.acquireUfsResource()) { alluxio.util.UnderFileSystemUtils.deleteFileIfExists(ufsResource.get(), ufsBlockPath); } catch (Exception e) { LOG.warn("Failed to clean up staging UFS block file {}: {}", ufsBlockPath, e.toString()); } } } } @Override public void heartbeat() throws InterruptedException { boolean queueEmpty = mPersistCheckerPool.getQueue().isEmpty(); // Check the progress of persist jobs. for (long fileId : mPersistJobs.keySet()) { // Throw if interrupted. if (Thread.interrupted()) { throw new InterruptedException("PersistenceChecker interrupted."); } final PersistJob job = mPersistJobs.get(fileId); if (job == null) { // This could happen if a key is removed from mPersistJobs while we are iterating. continue; } // Cancel any jobs marked as canceled switch (job.getCancelState()) { case NOT_CANCELED: break; case TO_BE_CANCELED: // Send the message to cancel this job JobMasterClient client = mJobMasterClientPool.acquire(); try { client.cancel(job.getId()); job.setCancelState(PersistJob.CancelState.CANCELING); } catch (alluxio.exception.status.NotFoundException e) { LOG.warn("Persist job (id={}) for file {} (id={}) to cancel was not found: {}", job.getId(), job.getUri(), fileId, e.getMessage()); LOG.debug("Exception: ", e); mPersistJobs.remove(fileId); continue; } catch (Exception e) { LOG.warn("Unexpected exception encountered when cancelling a persist job (id={}) for " + "file {} (id={}) : {}", job.getId(), job.getUri(), fileId, e.getMessage()); LOG.debug("Exception: ", e); } finally { mJobMasterClientPool.release(client); } continue; case CANCELING: break; default: throw new IllegalStateException("Unrecognized cancel state: " + job.getCancelState()); } if (!queueEmpty) { // There are tasks waiting in the queue, so do not try to schedule anything continue; } long jobId = job.getId(); JobMasterClient client = mJobMasterClientPool.acquire(); try { JobInfo jobInfo = client.getJobStatus(jobId); switch (jobInfo.getStatus()) { case RUNNING: // fall through case CREATED: break; case FAILED: LOG.warn("The persist job (id={}) for file {} (id={}) failed: {}", jobId, job.getUri(), fileId, jobInfo.getErrorMessage()); mPersistJobs.remove(fileId); mPersistRequests.put(fileId, job.getTimer()); break; case CANCELED: mPersistJobs.remove(fileId); break; case COMPLETED: mPersistJobs.remove(fileId); mPersistCheckerPool.execute(() -> handleSuccess(job)); break; default: throw new IllegalStateException("Unrecognized job status: " + jobInfo.getStatus()); } } catch (Exception e) { LOG.warn("Exception encountered when trying to retrieve the status of a " + " persist job (id={}) for file {} (id={}): {}.", jobId, job.getUri(), fileId, e.getMessage()); LOG.debug("Exception: ", e); mPersistJobs.remove(fileId); mPersistRequests.put(fileId, job.getTimer()); } finally { mJobMasterClientPool.release(client); } } } } @NotThreadSafe private final class TimeSeriesRecorder implements alluxio.heartbeat.HeartbeatExecutor { @Override public void heartbeat() throws InterruptedException { // TODO(calvin): Provide a better way to keep track of metrics collected as time series MetricRegistry registry = MetricsSystem.METRIC_REGISTRY; SortedMap<String, Gauge> gauges = registry.getGauges(); // % Alluxio space used Long masterCapacityTotal = (Long) gauges .get(MetricKey.CLUSTER_CAPACITY_TOTAL.getName()).getValue(); Long masterCapacityUsed = (Long) gauges .get(MetricKey.CLUSTER_CAPACITY_USED.getName()).getValue(); int percentAlluxioSpaceUsed = (masterCapacityTotal > 0) ? (int) (100L * masterCapacityUsed / masterCapacityTotal) : 0; mTimeSeriesStore.record("% Alluxio Space Used", percentAlluxioSpaceUsed); // % UFS space used Long masterUnderfsCapacityTotal = (Long) gauges .get(MetricKey.CLUSTER_ROOT_UFS_CAPACITY_TOTAL.getName()).getValue(); Long masterUnderfsCapacityUsed = (Long) gauges .get(MetricKey.CLUSTER_ROOT_UFS_CAPACITY_USED.getName()).getValue(); int percentUfsSpaceUsed = (masterUnderfsCapacityTotal > 0) ? (int) (100L * masterUnderfsCapacityUsed / masterUnderfsCapacityTotal) : 0; mTimeSeriesStore.record("% UFS Space Used", percentUfsSpaceUsed); // Bytes read Long bytesReadLocalThroughput = (Long) gauges.get( MetricKey.CLUSTER_BYTES_READ_LOCAL_THROUGHPUT.getName()).getValue(); Long bytesReadDomainSocketThroughput = (Long) gauges .get(MetricKey.CLUSTER_BYTES_READ_DOMAIN_THROUGHPUT.getName()).getValue(); Long bytesReadRemoteThroughput = (Long) gauges .get(MetricKey.CLUSTER_BYTES_READ_ALLUXIO_THROUGHPUT.getName()).getValue(); Long bytesReadUfsThroughput = (Long) gauges .get(MetricKey.CLUSTER_BYTES_READ_UFS_THROUGHPUT.getName()).getValue(); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_READ_LOCAL_THROUGHPUT.getName(), bytesReadLocalThroughput); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_READ_DOMAIN_THROUGHPUT.getName(), bytesReadDomainSocketThroughput); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_READ_ALLUXIO_THROUGHPUT.getName(), bytesReadRemoteThroughput); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_READ_UFS_THROUGHPUT.getName(), bytesReadUfsThroughput); // Bytes written Long bytesWrittenLocalThroughput = (Long) gauges .get(MetricKey.CLUSTER_BYTES_WRITTEN_LOCAL_THROUGHPUT.getName()) .getValue(); Long bytesWrittenAlluxioThroughput = (Long) gauges .get(MetricKey.CLUSTER_BYTES_WRITTEN_ALLUXIO_THROUGHPUT.getName()).getValue(); Long bytesWrittenDomainSocketThroughput = (Long) gauges.get( MetricKey.CLUSTER_BYTES_WRITTEN_DOMAIN_THROUGHPUT.getName()).getValue(); Long bytesWrittenUfsThroughput = (Long) gauges .get(MetricKey.CLUSTER_BYTES_WRITTEN_UFS_THROUGHPUT.getName()).getValue(); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_WRITTEN_LOCAL_THROUGHPUT.getName(), bytesWrittenLocalThroughput); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_WRITTEN_ALLUXIO_THROUGHPUT.getName(), bytesWrittenAlluxioThroughput); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_WRITTEN_DOMAIN_THROUGHPUT.getName(), bytesWrittenDomainSocketThroughput); mTimeSeriesStore.record(MetricKey.CLUSTER_BYTES_WRITTEN_UFS_THROUGHPUT.getName(), bytesWrittenUfsThroughput); } @Override public void close() {} // Nothing to clean up. } private static void cleanup(UnderFileSystem ufs, String ufsPath) { final String errMessage = "Failed to delete UFS file {}."; if (!ufsPath.isEmpty()) { try { if (!ufs.deleteExistingFile(ufsPath)) { LOG.warn(errMessage, ufsPath); } } catch (IOException e) { LOG.warn(errMessage, ufsPath, e); } } } @Override public void updateUfsMode(AlluxioURI ufsPath, UfsMode ufsMode) throws InvalidPathException, InvalidArgumentException, UnavailableException, AccessControlException { // TODO(adit): Create new fsadmin audit context try (RpcContext rpcContext = createRpcContext(); FileSystemMasterAuditContext auditContext = createAuditContext("updateUfsMode", ufsPath, null, null)) { mUfsManager.setUfsMode(rpcContext, ufsPath, ufsMode); auditContext.setSucceeded(true); } } /** * Check if the specified operation type is allowed to the ufs. * * @param alluxioPath the Alluxio path * @param opType the operation type */ private void checkUfsMode(AlluxioURI alluxioPath, OperationType opType) throws AccessControlException, InvalidPathException { MountTable.Resolution resolution = mMountTable.resolve(alluxioPath); try (CloseableResource<UnderFileSystem> ufsResource = resolution.acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); UfsMode ufsMode = ufs.getOperationMode(mUfsManager.getPhysicalUfsState(ufs.getPhysicalStores())); switch (ufsMode) { case NO_ACCESS: throw new AccessControlException(ExceptionMessage.UFS_OP_NOT_ALLOWED.getMessage(opType, resolution.getUri(), UfsMode.NO_ACCESS)); case READ_ONLY: if (opType == OperationType.WRITE) { throw new AccessControlException(ExceptionMessage.UFS_OP_NOT_ALLOWED.getMessage(opType, resolution.getUri(), UfsMode.READ_ONLY)); } break; default: // All operations are allowed break; } } } /** * The operation type. This class is used to check if an operation to the under storage is allowed * during maintenance. */ enum OperationType { READ, WRITE, } /** * Class that contains metrics for FileSystemMaster. * This class is public because the counter names are referenced in * {@link alluxio.web.WebInterfaceAbstractMetricsServlet}. */ public static final class Metrics { private static final Counter DIRECTORIES_CREATED = MetricsSystem.counter(MetricKey.MASTER_DIRECTORIES_CREATED.getName()); private static final Counter FILE_BLOCK_INFOS_GOT = MetricsSystem.counter(MetricKey.MASTER_FILE_BLOCK_INFOS_GOT.getName()); private static final Counter FILE_INFOS_GOT = MetricsSystem.counter(MetricKey.MASTER_FILE_INFOS_GOT.getName()); private static final Counter FILES_COMPLETED = MetricsSystem.counter(MetricKey.MASTER_FILES_COMPLETED.getName()); private static final Counter FILES_CREATED = MetricsSystem.counter(MetricKey.MASTER_FILES_CREATED.getName()); private static final Counter FILES_FREED = MetricsSystem.counter(MetricKey.MASTER_FILES_FREED.getName()); private static final Counter FILES_PERSISTED = MetricsSystem.counter(MetricKey.MASTER_FILES_PERSISTED.getName()); private static final Counter NEW_BLOCKS_GOT = MetricsSystem.counter(MetricKey.MASTER_NEW_BLOCKS_GOT.getName()); private static final Counter PATHS_DELETED = MetricsSystem.counter(MetricKey.MASTER_PATHS_DELETED.getName()); private static final Counter PATHS_MOUNTED = MetricsSystem.counter(MetricKey.MASTER_PATHS_MOUNTED.getName()); private static final Counter PATHS_RENAMED = MetricsSystem.counter(MetricKey.MASTER_PATHS_RENAMED.getName()); private static final Counter PATHS_UNMOUNTED = MetricsSystem.counter(MetricKey.MASTER_PATHS_UNMOUNTED.getName()); // TODO(peis): Increment the RPCs OPs at the place where we receive the RPCs. private static final Counter COMPLETE_FILE_OPS = MetricsSystem.counter(MetricKey.MASTER_COMPLETE_FILE_OPS.getName()); private static final Counter CREATE_DIRECTORIES_OPS = MetricsSystem.counter(MetricKey.MASTER_CREATE_DIRECTORIES_OPS.getName()); private static final Counter CREATE_FILES_OPS = MetricsSystem.counter(MetricKey.MASTER_CREATE_FILES_OPS.getName()); private static final Counter DELETE_PATHS_OPS = MetricsSystem.counter(MetricKey.MASTER_DELETE_PATHS_OPS.getName()); private static final Counter FREE_FILE_OPS = MetricsSystem.counter(MetricKey.MASTER_FREE_FILE_OPS.getName()); private static final Counter GET_FILE_BLOCK_INFO_OPS = MetricsSystem.counter(MetricKey.MASTER_GET_FILE_BLOCK_INFO_OPS.getName()); private static final Counter GET_FILE_INFO_OPS = MetricsSystem.counter(MetricKey.MASTER_GET_FILE_INFO_OPS.getName()); private static final Counter GET_NEW_BLOCK_OPS = MetricsSystem.counter(MetricKey.MASTER_GET_NEW_BLOCK_OPS.getName()); private static final Counter MOUNT_OPS = MetricsSystem.counter(MetricKey.MASTER_MOUNT_OPS.getName()); private static final Counter RENAME_PATH_OPS = MetricsSystem.counter(MetricKey.MASTER_RENAME_PATH_OPS.getName()); private static final Counter SET_ACL_OPS = MetricsSystem.counter(MetricKey.MASTER_SET_ACL_OPS.getName()); private static final Counter SET_ATTRIBUTE_OPS = MetricsSystem.counter(MetricKey.MASTER_SET_ATTRIBUTE_OPS.getName()); private static final Counter UNMOUNT_OPS = MetricsSystem.counter(MetricKey.MASTER_UNMOUNT_OPS.getName()); private static final Map<String, Map<UFSOps, Counter>> SAVED_UFS_OPS = new ConcurrentHashMap<>(); /** * UFS operations enum. */ public enum UFSOps { CREATE_FILE, GET_FILE_INFO, DELETE_FILE, LIST_STATUS } /** * Get operations saved per ufs counter. * * @param ufsPath ufsPath * @param ufsOp ufs operation * @return the counter object */ @VisibleForTesting public static Counter getUfsCounter(String ufsPath, UFSOps ufsOp) { return SAVED_UFS_OPS.compute(ufsPath, (k, v) -> { if (v != null) { return v; } else { return new ConcurrentHashMap<>(); } }).compute(ufsOp, (k, v) -> { if (v != null) { return v; } else { return MetricsSystem.counter( Metric.getMetricNameWithTags(UFS_OP_SAVED_PREFIX + ufsOp.name(), MetricInfo.TAG_UFS, MetricsSystem.escape(new AlluxioURI(ufsPath)))); } }); } /** * Register some file system master related gauges. * * @param master the file system master * @param ufsManager the under filesystem manager */ @VisibleForTesting public static void registerGauges( final FileSystemMaster master, final UfsManager ufsManager) { MetricsSystem.registerGaugeIfAbsent(MetricKey.MASTER_FILES_PINNED.getName(), master::getNumberOfPinnedFiles); MetricsSystem.registerGaugeIfAbsent(MetricKey.MASTER_TOTAL_PATHS.getName(), () -> master.getInodeCount()); final String ufsDataFolder = ServerConfiguration.get(PropertyKey.MASTER_MOUNT_TABLE_ROOT_UFS); MetricsSystem.registerGaugeIfAbsent(MetricKey.CLUSTER_ROOT_UFS_CAPACITY_TOTAL.getName(), () -> { try (CloseableResource<UnderFileSystem> ufsResource = ufsManager.getRoot().acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); return ufs.getSpace(ufsDataFolder, UnderFileSystem.SpaceType.SPACE_TOTAL); } catch (IOException e) { LOG.error(e.getMessage(), e); return Stream.empty(); } }); MetricsSystem.registerGaugeIfAbsent(MetricKey.CLUSTER_ROOT_UFS_CAPACITY_USED.getName(), () -> { try (CloseableResource<UnderFileSystem> ufsResource = ufsManager.getRoot().acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); return ufs.getSpace(ufsDataFolder, UnderFileSystem.SpaceType.SPACE_USED); } catch (IOException e) { LOG.error(e.getMessage(), e); return Stream.empty(); } }); MetricsSystem.registerGaugeIfAbsent(MetricKey.CLUSTER_ROOT_UFS_CAPACITY_FREE.getName(), () -> { long ret = 0L; try (CloseableResource<UnderFileSystem> ufsResource = ufsManager.getRoot().acquireUfsResource()) { UnderFileSystem ufs = ufsResource.get(); ret = ufs.getSpace(ufsDataFolder, UnderFileSystem.SpaceType.SPACE_FREE); } catch (IOException e) { LOG.error(e.getMessage(), e); } return ret; }); } private Metrics() {} // prevent instantiation } /** * Creates a {@link FileSystemMasterAuditContext} instance. * * @param command the command to be logged by this {@link AuditContext} * @param srcPath the source path of this command * @param dstPath the destination path of this command * @param srcInode the source inode of this command * @return newly-created {@link FileSystemMasterAuditContext} instance */ private FileSystemMasterAuditContext createAuditContext(String command, AlluxioURI srcPath, @Nullable AlluxioURI dstPath, @Nullable Inode srcInode) { FileSystemMasterAuditContext auditContext = new FileSystemMasterAuditContext(mAsyncAuditLogWriter); if (mAsyncAuditLogWriter != null) { String user = null; String ugi = ""; try { user = AuthenticatedClientUser.getClientUser(ServerConfiguration.global()); } catch (AccessControlException e) { ugi = "N/A"; } if (user != null) { try { String primaryGroup = CommonUtils.getPrimaryGroupName(user, ServerConfiguration.global()); ugi = user + "," + primaryGroup; } catch (IOException e) { LOG.debug("Failed to get primary group for user {}.", user); ugi = user + ",N/A"; } } AuthType authType = ServerConfiguration.getEnum(PropertyKey.SECURITY_AUTHENTICATION_TYPE, AuthType.class); auditContext.setUgi(ugi) .setAuthType(authType) .setIp(ClientIpAddressInjector.getIpAddress()) .setCommand(command).setSrcPath(srcPath).setDstPath(dstPath) .setSrcInode(srcInode).setAllowed(true); } return auditContext; } private BlockDeletionContext createBlockDeletionContext() { return new DefaultBlockDeletionContext(this::removeBlocks, blocks -> blocks.forEach(mUfsBlockLocationCache::invalidate)); } private void removeBlocks(List<Long> blocks) throws IOException { if (blocks.isEmpty()) { return; } RetryPolicy retry = new CountingRetry(3); IOException lastThrown = null; while (retry.attempt()) { try { mBlockMaster.removeBlocks(blocks, true); return; } catch (UnavailableException e) { lastThrown = e; } } throw new IOException("Failed to remove deleted blocks from block master", lastThrown); } /** * @return a context for executing an RPC */ @VisibleForTesting public RpcContext createRpcContext() throws UnavailableException { return createRpcContext(new InternalOperationContext()); } /** * @param operationContext the operation context * @return a context for executing an RPC */ @VisibleForTesting public RpcContext createRpcContext(OperationContext operationContext) throws UnavailableException { return new RpcContext(createBlockDeletionContext(), createJournalContext(), operationContext.withTracker(mStateLockCallTracker)); } private LockingScheme createLockingScheme(AlluxioURI path, FileSystemMasterCommonPOptions options, LockPattern desiredLockMode) { return new LockingScheme(path, desiredLockMode, options, mUfsSyncPathCache, false); } private LockingScheme createSyncLockingScheme(AlluxioURI path, FileSystemMasterCommonPOptions options, boolean isGetFileInfo) { return new LockingScheme(path, LockPattern.READ, options, mUfsSyncPathCache, isGetFileInfo); } boolean isAclEnabled() { return ServerConfiguration.getBoolean(PropertyKey.SECURITY_AUTHORIZATION_PERMISSION_ENABLED); } @Override public List<TimeSeries> getTimeSeries() { return mTimeSeriesStore.getTimeSeries(); } @Override public AlluxioURI reverseResolve(AlluxioURI ufsUri) throws InvalidPathException { MountTable.ReverseResolution resolution = mMountTable.reverseResolve(ufsUri); if (resolution == null) { throw new InvalidPathException(ufsUri.toString() + " is not a valid ufs uri"); } return resolution.getUri(); } @Override @Nullable public String getRootInodeOwner() { return mInodeTree.getRootUserName(); } }
apache-2.0
mbraak/jqTree
static/examples/load_json_data_from_server.js
188
$.mockjax({ url: "*", response: function(options) { this.responseText = ExampleData.exampleData; }, responseTime: 0 }); $(function() { $("#tree1").tree(); });
apache-2.0