file_path
				 
			stringlengths 11 
			219 
			 | num_changed_lines
				 
			int64 0 
			58.4k 
			 | code
				 
			stringlengths 0 
			3.52M 
			 | repo_name
				 
			stringclasses 25
				values  | commit_date
				 
			stringdate 2017-01-24 00:00:00 
			2017-01-28 00:00:00 
			 | sha
				 
			stringclasses 25
				values  | 
|---|---|---|---|---|---|
	src/renderers/testing/ReactTestRendererFiber.js 
 | 295 
							 | 
	/**
 * Copyright 2013-present, Facebook, Inc.
 * All rights reserved.
 *
 * This source code is licensed under the BSD-style license found in the
 * LICENSE file in the root directory of this source tree. An additional grant
 * of patent rights can be found in the PATENTS file in the same directory.
 *
 * @providesModule ReactTestRendererFiber
 * @preventMunge
 * @flow
 */
'use strict';
var ReactFiberReconciler = require('ReactFiberReconciler');
var ReactGenericBatching = require('ReactGenericBatching');
var emptyObject = require('emptyObject');
import type { TestRendererOptions } from 'ReactTestMount';
type ReactTestRendererJSON = {|
  type : string,
  props : {[propName: string] : any },
  children : null | Array<ReactTestRendererNode>,
  $$typeof ?: Symbol, // Optional because we add it with defineProperty().
|};
type ReactTestRendererNode = ReactTestRendererJSON | string;
type Container = {|
  children : Array<Instance | TextInstance>,
  createNodeMock : Function,
  tag : 'CONTAINER',
|};
type Props = Object;
type Instance = {|
  type : string,
  props : Object,
  children : Array<Instance | TextInstance>,
  rootContainerInstance : Container,
  tag : 'INSTANCE',
|};
type TextInstance = {|
  text : string,
  tag : 'TEXT',
|};
const UPDATE_SIGNAL = {};
var TestRenderer = ReactFiberReconciler({
  getRootHostContext() {
    return emptyObject;
  },
  getChildHostContext() {
    return emptyObject;
  },
  prepareForCommit() : void {
    // noop
  },
  resetAfterCommit() : void {
    // noop
  },
  createInstance(
    type : string,
    props : Props,
    rootContainerInstance : Container,
    hostContext : Object,
    internalInstanceHandle : Object,
  ) : Instance {
    return {
      type,
      props,
      children: [],
      rootContainerInstance,
      tag: 'INSTANCE',
    };
  },
  appendInitialChild(parentInstance : Instance, child : Instance | TextInstance) : void {
    const index = parentInstance.children.indexOf(child);
    if (index !== -1) {
      parentInstance.children.splice(index, 1);
    }
    parentInstance.children.push(child);
  },
  finalizeInitialChildren(
    testElement : Instance,
    type : string,
    props : Props,
    rootContainerInstance : Container,
  ) : boolean {
    return false;
  },
  prepareUpdate(
    testElement : Instance,
    type : string,
    oldProps : Props,
    newProps : Props,
    rootContainerInstance : Container,
    hostContext : Object,
  ) : null | {} {
    return UPDATE_SIGNAL;
  },
  commitUpdate(
    instance : Instance,
    updatePayload : {},
    type : string,
    oldProps : Props,
    newProps : Props,
    internalInstanceHandle : Object,
  ) : void {
    instance.type = type;
    instance.props = newProps;
  },
  commitMount(
    instance : Instance,
    type : string,
    newProps : Props,
    internalInstanceHandle : Object
  ) : void {
    // noop
  },
  shouldSetTextContent(props : Props) : boolean {
    return false;
  },
  resetTextContent(testElement : Instance) : void {
    // noop
  },
  createTextInstance(
    text : string,
    rootContainerInstance : Container,
    hostContext : Object,
    internalInstanceHandle : Object
  ) : TextInstance {
    return {
      text,
      tag: 'TEXT',
    };
  },
  commitTextUpdate(textInstance : TextInstance, oldText : string, newText : string) : void {
    textInstance.text = newText;
  },
  appendChild(parentInstance : Instance | Container, child : Instance | TextInstance) : void {
    const index = parentInstance.children.indexOf(child);
    if (index !== -1) {
      parentInstance.children.splice(index, 1);
    }
    parentInstance.children.push(child);
  },
  insertBefore(
    parentInstance : Instance | Container,
    child : Instance | TextInstance,
    beforeChild : Instance | TextInstance
  ) : void {
    const index = parentInstance.children.indexOf(child);
    if (index !== -1) {
      parentInstance.children.splice(index, 1);
    }
    const beforeIndex = parentInstance.children.indexOf(beforeChild);
    parentInstance.children.splice(beforeIndex, 0, child);
  },
  removeChild(parentInstance : Instance | Container, child : Instance | TextInstance) : void {
    const index = parentInstance.children.indexOf(child);
    parentInstance.children.splice(index, 1);
  },
  scheduleAnimationCallback(fn : Function) : void {
    setTimeout(fn);
  },
  scheduleDeferredCallback(fn : Function) : void {
    setTimeout(fn, 0, {timeRemaining: Infinity});
  },
  useSyncScheduling: true,
  getPublicInstance(inst) {
    switch (inst.tag) {
      case 'INSTANCE':
        const createNodeMock = inst.rootContainerInstance.createNodeMock;
        return createNodeMock({
          type: inst.type,
          props: inst.props,
        });
      default:
        return inst;
    }
  },
});
var defaultTestOptions = {
  createNodeMock: function() {
    return null;
  },
};
function toJSON(inst : Instance | TextInstance) : ReactTestRendererNode {
  switch (inst.tag) {
    case 'TEXT':
      return inst.text;
    case 'INSTANCE':
      /* eslint-disable no-unused-vars */
      // We don't include the `children` prop in JSON.
      // Instead, we will include the actual rendered children.
      const {children, ...props} = inst.props;
      /* eslint-enable */
      let renderedChildren = null;
      if (inst.children && inst.children.length) {
        renderedChildren = inst.children.map(toJSON);
      }
      const json : ReactTestRendererJSON = {
        type: inst.type,
        props: props,
        children: renderedChildren,
      };
      Object.defineProperty(json, '$$typeof', {value: Symbol.for('react.test.json')});
      return json;
    default:
      throw new Error(`Unexpected node type in toJSON: ${inst.tag}`);
  }
}
var ReactTestFiberRenderer = {
  create(element : ReactElement<any>, options : TestRendererOptions) {
    var createNodeMock = defaultTestOptions.createNodeMock;
    if (options && typeof options.createNodeMock === 'function') {
      createNodeMock = options.createNodeMock;
    }
    var container = {
      children: [],
      createNodeMock,
      tag: 'CONTAINER',
    };
    var root = TestRenderer.createContainer(container);
    TestRenderer.updateContainer(element, root, null, null);
    return {
      toJSON() {
        if (root == null || container == null) {
          return null;
        }
        if (container.children.length === 0) {
          return null;
        }
        if (container.children.length === 1) {
          return toJSON(container.children[0]);
        }
        return container.children.map(toJSON);
      },
      update(newElement : ReactElement<any>) {
        if (root == null) {
          return;
        }
        TestRenderer.updateContainer(newElement, root, null, null);
      },
      unmount() {
        if (root == null) {
          return;
        }
        TestRenderer.updateContainer(null, root, null);
        container = null;
        root = null;
      },
      getInstance() {
        if (root == null) {
          return null;
        }
        return TestRenderer.getPublicRootInstance(root);
      },
    };
  },
  /* eslint-disable camelcase */
  unstable_batchedUpdates: ReactGenericBatching.batchedUpdates,
  /* eslint-enable camelcase */
};
module.exports = ReactTestFiberRenderer;
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	scripts/release-manager/commands/npm-grant-access.js 
 | 36 
							 | 
	'use strict';
const npmUtils = require('./utils/npm');
const chalk = require('chalk');
module.exports = function(vorpal, app) {
  vorpal
    .command('npm-grant-access')
    .description('Grant access to somebody to publish React. Assumes you ran "npm-check-access" first.')
    .action(function(args) {
      return new Promise((resolve, reject) => {
        this.prompt({
          type: 'input',
          message: 'Who would you like to grant access to? ',
          name: 'username',
        }).then((answers) => {
          if (!answers.username) {
            return reject('ABORTING');
          }
          const packagesNeedingAccess = npmUtils.packagesNeedingAccess(app, answers.username);
          if (packagesNeedingAccess.length) {
            this.log(`${chalk.yellow('PENDING')} Granting access to ${packagesNeedingAccess}`);
            npmUtils.grantAccess(app, answers.username, packagesNeedingAccess);
            this.log(`${chalk.green('OK')} Access has been granted to ${answers.username}.`);
            resolve();
          } else {
            this.log(`${chalk.green('OK')} ${answers.username} already has access.`);
            resolve();
          }
        });
      });
    });
};
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/packaging/build-all.js 
 | 30 
							 | 
	var fs = require('fs');
var path = require('path');
var { spawnSync } = require('child_process');
var fixtureDirs = fs.readdirSync(__dirname).filter((file) => {
  return fs.statSync(path.join(__dirname, file)).isDirectory();
});
var cmdArgs = [
  {cmd: 'npm', args: ['install']},
  {cmd: 'npm', args: ['run', 'build']},
];
for (const dir of fixtureDirs) {
  for (const cmdArg of cmdArgs) {
    const opts = {
      cwd: path.join(__dirname, dir),
      stdio: 'inherit',
    };
    let result = spawnSync(cmdArg.cmd, cmdArg.args, opts);
    if (result.status !== 0) {
      throw new Error('Failed to build fixtures.');
    }
  }
}
console.log('-------------------------');
console.log('All fixtures were built!');
console.log('Now make sure to open each HTML file in this directory and each index.html in subdirectories.');
console.log('-------------------------');
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/dom/public/react-loader.js 
 | 43 
							 | 
	/**
 * Take a version from the window query string and load a specific
 * version of React.
 *
 * @example
 * http://localhost:3000?version=15.4.1
 * (Loads React 15.4.1)
 */
var REACT_PATH = 'react.js';
var DOM_PATH = 'react-dom.js';
function parseQuery(qstr) {
  var query = {};
  var a = qstr.substr(1).split('&');
  for (var i = 0; i < a.length; i++) {
    var b = a[i].split('=');
    query[decodeURIComponent(b[0])] = decodeURIComponent(b[1] || '');
  }
  return query;
}
var query = parseQuery(window.location.search);
var version = query.version || 'local';
if (version !== 'local') {
  REACT_PATH = 'https://unpkg.com/react@' + version + '/dist/react.min.js';
  DOM_PATH = 'https://unpkg.com/react-dom@' + version + '/dist/react-dom.min.js';
}
document.write('<script src="' + REACT_PATH + '"></script>');
// Versions earlier than 14 do not use ReactDOM
if (version === 'local' || parseFloat(version, 10) > 0.13) {
  document.write('<script src="' + DOM_PATH + '"></script>');
} else {
  // Aliasing React to ReactDOM for compatability.
  document.write('<script>ReactDOM = React</script>');
}
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	scripts/release-manager/commands/version.js 
 | 162 
							 | 
	'use strict';
const fs = require('fs');
const path = require('path');
const semver = require('semver');
const chalk = require('chalk');
const git = require('./utils/git');
// Overview
// 1. Display current version
// 2. Prompt for new version
// 3. Update appropriate files
//    - package.json (version)
//    - npm-shrinkwrap.json (version)
//    - packages/react/package.json (version)
//    - packages/react-addons/package.json (version, peerDependencies.react)
//    - packages/react-dom/package.json (version, peerDependencies.react)
//    - packages/react-native-renderer/package.json (version, peerDependencies.react)
//    - packages/react-test-renderer/package.json (version, peerDependencies.react)
//    - src/ReactVersion.js (module.exports)
// 4. Commit?
function updateJSON(path, fields, value) {
  let data;
  try {
    data = JSON.parse(fs.readFileSync(path, 'utf8'));
  } catch (e) {
    this.log(chalk.red('ERROR') + ` ${path} doesn't exist… skipping.`);
  }
  fields.forEach((field) => {
    let fieldPath = field.split('.');
    if (fieldPath.length === 1) {
      data[field] = value;
    } else {
      // assume length of 2 is some dep.react and we can just use ^ because we
      // know it's true. do something more versatile later
      data[fieldPath[0]][fieldPath[1]] = '^' + value;
    }
  });
  fs.writeFileSync(path, JSON.stringify(data, null, 2) + '\n');
}
module.exports = function(vorpal, app) {
  vorpal
    .command('version')
    .description('Update the version of React, useful while publishing')
    .action(function(args, actionCB) {
      let currentVersion = app.getReactVersion();
      // TODO: See if we can do a better job for handling pre* bumps. The ones
      // semver adds are of the form -0, but we've used -alpha.0 or -rc.0.
      // 'prerelease' will increment those properly (but otherwise has the same problem).
      // Live with it for now since it won't be super common. Write docs.
      let choices = ['prerelease', 'patch', 'minor', 'major'].map((release) => {
        let version = semver.inc(currentVersion, release);
        return {
          value: version,
          name:`${chalk.bold(version)} (${release})`,
        };
      });
      choices.push('Other');
      this.prompt([
        {
          type: 'list',
          name: 'version',
          choices: choices,
          message: `New version (currently ${chalk.bold(currentVersion)}):`,
        },
        {
          type: 'input',
          name: 'version',
          message: `New version (currently ${chalk.bold(currentVersion)}): `,
          when: (res) => res.version === 'Other',
        },
      ]).then((res) => {
        let newVersion = semver.valid(res.version);
        if (!newVersion) {
          return actionCB(`${chalk.red('ERROR')} ${res.version} is not a semver-valid version`);
        }
        this.log(`Updating to ${newVersion}`);
        // The JSON files. They're all updated the same way so batch.
        [
          {
            file: 'package.json',
            fields: ['version'],
          },
          {
            file: 'npm-shrinkwrap.json',
            fields: ['version'],
          },
          {
            file: 'packages/react/package.json',
            fields: ['version'],
          },
          {
            file: 'packages/react-addons/package.json',
            fields: ['version', 'peerDependencies.react'],
          },
          {
            file: 'packages/react-dom/package.json',
            fields: ['version', 'peerDependencies.react'],
          },
          {
            file: 'packages/react-native-renderer/package.json',
            fields: ['version', 'peerDependencies.react'],
          },
          {
            file: 'packages/react-test-renderer/package.json',
            fields: ['version', 'peerDependencies.react'],
          },
        ].forEach((opts) => {
          updateJSON.apply(this, [path.join(app.config.reactPath, opts.file), opts.fields, newVersion]);
        });
        // We also need to update src/ReactVersion.js which has the version in
        // string form in JS code. We'll just do a string replace.
        const PATH_TO_REACTVERSION = path.join(app.config.reactPath, 'src/ReactVersion.js');
        let reactVersionContents = fs.readFileSync(PATH_TO_REACTVERSION, 'utf8');
        reactVersionContents =
          reactVersionContents.replace(currentVersion, newVersion);
        fs.writeFileSync(PATH_TO_REACTVERSION, reactVersionContents);
        this.prompt([
          {
            name: 'commit',
            type: 'confirm',
            message: 'Commit these changes (`git commit -a`)?',
            default: true,
          },
          {
            name: 'tag',
            type: 'confirm',
            message: 'Tag the version commit (not necessary for non-stable releases)?',
            default: true,
            when: (res) => res.commit,
          },
        ]).then((res) => {
          if (res.commit) {
            git.commit(app, newVersion, true);
          }
          if (res.tag) {
            git.tag(app, `v${newVersion}`);
          }
          actionCB();
        });
      });
    });
};
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/dom/src/components/fixtures/textareas/index.js 
 | 36 
							 | 
	const React = window.React;
const TextAreaFixtures = React.createClass({
  getInitialState() {
    return { value: '' };
  },
  onChange(event) {
    this.setState({ value: event.target.value });
  },
  render() {
    return (
      <div>
        <form className="container">
          <fieldset>
            <legend>Controlled</legend>
            <textarea value={this.state.value} onChange={this.onChange} />
          </fieldset>
          <fieldset>
            <legend>Uncontrolled</legend>
            <textarea defaultValue="" />
          </fieldset>
        </form>
        <div className="container">
          <h4>Controlled Output:</h4>
          <div className="output">
            {this.state.value}
          </div>
        </div>
      </div>
    );
  },
});
module.exports = TextAreaFixtures;
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	scripts/release-manager/commands/stable-prs.js 
 | 263 
							 | 
	'use strict';
const chalk = require('chalk');
const pify = require('pify');
const git = require('./utils/git');
const SEMVER_LABELS = [
  'semver-major',
  'semver-minor',
  'semver-patch',
  'semver-exempt',
];
module.exports = function(vorpal, app) {
  vorpal
    .command('stable-prs')
    .description('Get list of stable pull requests that need to be merged to the stable branch')
    .action(function(args) {
      // This makes the chaining easier but obfuscates the actual API, which is
      // unfortunate. The standalone API will return the right data but
      // promisified will get the response object and then we need to pull data
      // off of that.
      let listMilestones = pify(app.ghissues.listMilestones.bind(app.ghissues));
      let listIssues = pify(app.ghissues.listIssues.bind(app.ghissues));
      let editIssue = pify(app.ghissues.editIssue.bind(app.ghissues));
      let getPullRequest = pify(app.ghrepo.getPullRequest.bind(app.ghrepo));
      let targetMilestone = null;
      return new Promise((resolveAction, rejectAction) => {
        listMilestones(null).then((milestones) => {
          app.writeTo('milestones.json', milestones);
          // Turn the milestones into choices for Inquirer
          let milestoneChoices = milestones.map((milestone) => {
            return {
              value: milestone.number,
              name: milestone.title,
            };
          });
          // We need label choices too
          let labelChoices = SEMVER_LABELS.map((label) => {
            return {
              value: label,
              name: label.split('-')[1], // "major" instead of "semver-major"
            };
          });
          // Ask about source milestone
          // Ask about dest milestone
          // TODO: allow creation of milestone here.
          // Ask about which labels to pull from
          return this.prompt([
            {
              name: 'srcMilestone',
              type: 'list',
              message: 'Which milestone should we pull PRs from?',
              choices: milestoneChoices,
            },
            {
              name: 'destMilestone',
              type: 'list',
              message: 'Which milestone should we assign PRs to upon completion?',
              choices: milestoneChoices,
            },
            {
              name: 'labels',
              type: 'checkbox',
              message: 'Which PRs should we select (use spacebar to check all that apply)',
              choices: labelChoices,
            },
          ]).then((answers) => {
            // this.log(JSON.stringify(answers, null, 2));
            targetMilestone = answers.destMilestone;
            let labels = {};
            answers.labels.forEach((label) => {
              labels[label] = true;
            });
            return {
              labels: labels,
              query: {
                milestone: answers.srcMilestone,
                per_page: 100,
                state: 'closed',
              },
            };
          });
        })
        // Request issues, filter to applicable PRs
        .then(({labels, query}) => {
          return listIssues(query).then((issues) => {
            app.writeTo('stable-issues.json', issues);
            // This API *could* return issues that aren't pull requests, so filter out
            // issues that don't have pull_request set. Also filter out issues that
            // aren't the right level of semver (eg if running a patch release)
            let filteringLabels = Object.keys(labels).length > 0;
            const pulls = issues.filter((issue) => {
              if (!issue.pull_request) {
                return false;
              }
              if (!filteringLabels) {
                return true;
              }
              return issue.labels.some((label) => labels[label.name]);
            });
            app.writeTo('stable-prs.json', pulls);
            return pulls;
          })
          // We need to convert the issues to PRs. We don't actually have enough
          // info for the pull request data, so we need to get more. Then we'll
          // do some filtering and sorting to make sure we apply merged PRs in
          // the order they were originally committed to avoid conflicts as much
          // as possible.
          .then((pulls) => {
            return Promise.all(pulls.map((pr) => {
              return getPullRequest(pr.number)
                .then((richPR) => {
                  app.writeTo(`pr-${pr.number}.json`, richPR);
                  richPR.__originalIssue = pr;
                  return richPR;
                });
            }))
            .then((richPRs) => {
              return richPRs.filter((pr) => {
                if (!pr.merged_at) {
                  this.log(`${chalk.yellow.bold('WARNING')} ${pr.html_url} was not merged, should have the milestone unset.`);
                  return false;
                }
                return true;
              }).map((pr) => {
                pr.merged_at_date = new Date(pr.merged_at);
                return pr;
              }).sort((a, b) => a.merged_at_date - b.merged_at_date);
            });
          });
        })
        // Quick prompt to double check that we should proceed.
        .then((pulls) => {
          this.log(`Found ${chalk.bold(pulls.length)} pull requests:`);
          pulls.forEach((pr) => {
            this.log(`${pr.html_url}: ${chalk.bold(pr.title)}`);
          });
          return this.prompt({
            name: 'merge',
            type: 'confirm',
            message: `Merge these ${pulls.length} pull requests?`,
          }).then((answers) => {
            return answers.merge ? pulls : rejectAction('cancelled');
          });
        })
        // Ok, now we finally have rich pull request data. We can start cherry picking…
        .then((pulls) => {
          // We're going to do some error handling here so we don't get into a
          // terrible state.
          this.log(`Found ${chalk.bold(pulls.length)} pull requests:`);
          return new Promise((resolve, reject) => {
            cherryPickPRs.call(this, app, pulls)
              .then((results) => {
                resolve(results);
              })
              .catch((err) => {
                this.log(`${chalk.red.bold('ERROR')} Something went wrong and your repo is probably in a bad state. Sorry.`);
                resolve({
                  successful: [],
                  skipped: [],
                  didAbort: true,
                });
              });
          });
        })
        // Update the milestone on successful PRs
        // TODO: maybe handle didAbort and git reset --hard to a rev we read when we start the process?
        .then(({successful, aborted, didAbort}) => {
          if (didAbort) {
            return undefined;
          }
          return Promise.all(successful.map((pr) => {
            return editIssue(pr.number, {milestone: targetMilestone});
          }));
        })
        // yay, we're done
        .then(() => {
          resolveAction();
        })
        .catch((err) => {
          this.log('ERROR', err);
          rejectAction();
        });
      });
    });
};
function cherryPickPRs(app, prs) {
  let successful = [];
  let skipped = [];
  return new Promise((resolve, reject) => {
    // Build array of thenables
    let promises = prs.map((pr) => {
      return () => new Promise((res, rej) => {
        this.log(chalk.yellow(`Cherry-picking #${pr.number} (${pr.title})...`));
        let failed = false;
        try {
          git.cherryPickMerge(app, pr.merge_commit_sha);
        } catch (e) {
          failed = true;
        }
        if (!failed) {
          this.log(chalk.green`Success`);
          successful.push(pr);
          return res();
        }
        return this.prompt({
          name: 'handle',
          type: 'list',
          message: `${chalk.red`Failed!`} ${chalk.yellow('This must be resolved manually!')}`,
          choices: [
            {value: 'ok', name: 'Continue, mark successful'},
            {value: 'skip', name: 'Continue, mark skipped'},
            {value: 'abort', name: 'Abort process. Will require manual resetting of git state.'},
          ],
        }).then((answers) => {
          switch (answers.handle) {
            case 'ok':
              successful.push(pr);
              break;
            case 'skip':
              skipped.push(pr);
              break;
            case 'abort':
              return rej(pr.number);
          }
          res(pr.number);
        });
      });
    });
    // Since promises run on creation and we don't actually want that, we create
    // an array of functions that return promises. We'll chain them here, not
    // actually creating the next promise until we're ready.
    var p = promises[0]();
    for (let i = 1; i < promises.length; i++) {
      p = p.then(() => promises[i]());
    }
    p.then(() => {
      resolve({successful, skipped, didAbort: false});
    }).catch((e) => {
      resolve({successful, skipped, didAbort: true});
    });
  });
}
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	src/renderers/shared/shared/__tests__/ReactStatelessComponent-test.js 
 | 162 
							 | 
	/**
 * Copyright 2013-present, Facebook, Inc.
 * All rights reserved.
 *
 * This source code is licensed under the BSD-style license found in the
 * LICENSE file in the root directory of this source tree. An additional grant
 * of patent rights can be found in the PATENTS file in the same directory.
 *
 * @emails react-core
 */
'use strict';
var React;
var ReactDOM;
var ReactTestUtils;
var ReactDOMFeatureFlags = require('ReactDOMFeatureFlags');
function StatelessComponent(props) {
  return <div>{props.name}</div>;
}
describe('ReactStatelessComponent', () => {
  function normalizeCodeLocInfo(str) {
    return str && str.replace(/\(at .+?:\d+\)/g, '(at **)');
  }
  beforeEach(() => {
    jest.resetModuleRegistry();
    React = require('React');
    ReactDOM = require('ReactDOM');
    ReactTestUtils = require('ReactTestUtils');
  });
  it('should render stateless component', () => {
    var el = document.createElement('div');
    ReactDOM.render(<StatelessComponent name="A" />, el);
    expect(el.textContent).toBe('A');
  });
  it('should update stateless component', () => {
    class Parent extends React.Component {
      render() {
        return <StatelessComponent {...this.props} />;
      }
    }
    var el = document.createElement('div');
    ReactDOM.render(<Parent name="A" />, el);
    expect(el.textContent).toBe('A');
    ReactDOM.render(<Parent name="B" />, el);
    expect(el.textContent).toBe('B');
  });
  it('should unmount stateless component', () => {
    var container = document.createElement('div');
    ReactDOM.render(<StatelessComponent name="A" />, container);
    expect(container.textContent).toBe('A');
    ReactDOM.unmountComponentAtNode(container);
    expect(container.textContent).toBe('');
  });
  it('should pass context thru stateless component', () => {
    class Child extends React.Component {
      static contextTypes = {
        test: React.PropTypes.string.isRequired,
      };
      render() {
        return <div>{this.context.test}</div>;
      }
    }
    function Parent() {
      return <Child />;
    }
    class GrandParent extends React.Component {
      static childContextTypes = {
        test: React.PropTypes.string.isRequired,
      };
      getChildContext() {
        return {test: this.props.test};
      }
      render() {
        return <Parent />;
      }
    }
    var el = document.createElement('div');
    ReactDOM.render(<GrandParent test="test" />, el);
    expect(el.textContent).toBe('test');
    ReactDOM.render(<GrandParent test="mest" />, el);
    expect(el.textContent).toBe('mest');
  });
  it('should warn for childContextTypes on a functional component', () => {
    spyOn(console, 'error');
    function StatelessComponentWithChildContext(props) {
      return <div>{props.name}</div>;
    }
    StatelessComponentWithChildContext.childContextTypes = {
      foo: React.PropTypes.string,
    };
    var container = document.createElement('div');
    ReactDOM.render(<StatelessComponentWithChildContext name="A" />, container);
    expectDev(console.error.calls.count()).toBe(2);
    expectDev(console.error.calls.argsFor(0)[0]).toContain(
      'StatelessComponentWithChildContext(...): childContextTypes cannot ' +
      'be defined on a functional component.'
    );
    expectDev(normalizeCodeLocInfo(console.error.calls.argsFor(1)[0])).toBe(
      'Warning: StatelessComponentWithChildContext.childContextTypes is specified ' +
      'but there is no getChildContext() method on the instance. You can either ' +
      'define getChildContext() on StatelessComponentWithChildContext or remove ' +
      'childContextTypes from it.'
    );
  });
  if (!ReactDOMFeatureFlags.useFiber) {
    // Stack doesn't support fragments
    it('should throw when stateless component returns array', () => {
      function NotAComponent() {
        return [<div />, <div />];
      }
      expect(function() {
        ReactTestUtils.renderIntoDocument(<div><NotAComponent /></div>);
      }).toThrowError(
        'NotAComponent(...): A valid React element (or null) must be returned. ' +
        'You may have returned undefined, an array or some other invalid object.'
      );
    });
  }
  it('should throw when stateless component returns undefined', () => {
    function NotAComponent() {
    }
    expect(function() {
      ReactTestUtils.renderIntoDocument(<div><NotAComponent /></div>);
    }).toThrowError(
      'NotAComponent(...): A valid React element (or null) must be returned. ' +
      'You may have returned undefined, an array or some other invalid object.'
    );
  });
  it('should throw on string refs in pure functions', () => {
    function Child() {
      return <div ref="me" />;
    }
    expect(function() {
      ReactTestUtils.renderIntoDocument(<Child test="test" />);
    }).toThrowError(
      'Stateless function components cannot have refs.'
    );
  });
  it('should warn when given a string ref', () => {
    spyOn(console, 'error');
    function Indirection(props) {
      return <div>{props.children}</div>;
    }
    class ParentUsingStringRef extends React.Component {
      render() {
        return (
          <Indirection>
            <StatelessComponent name="A" ref="stateless" />
          </Indirection>
        );
      }
    }
    ReactTestUtils.renderIntoDocument(<ParentUsingStringRef />);
    expectDev(console.error.calls.count()).toBe(1);
    expectDev(normalizeCodeLocInfo(console.error.calls.argsFor(0)[0])).toBe(
      'Warning: Stateless function components cannot be given refs. ' +
      'Attempts to access this ref will fail. Check the render method ' +
      'of `ParentUsingStringRef`.\n' +
      '    in StatelessComponent (at **)\n' +
      '    in div (at **)\n' +
      '    in Indirection (at **)\n' +
      '    in ParentUsingStringRef (at **)'
    );
    ReactTestUtils.renderIntoDocument(<ParentUsingStringRef />);
    expectDev(console.error.calls.count()).toBe(1);
  });
  it('should warn when given a function ref', () => {
    spyOn(console, 'error');
    function Indirection(props) {
      return <div>{props.children}</div>;
    }
    class ParentUsingFunctionRef extends React.Component {
      render() {
        return (
          <Indirection>
            <StatelessComponent name="A" ref={(arg) => {
              expect(arg).toBe(null);
            }} />
          </Indirection>
        );
      }
    }
    ReactTestUtils.renderIntoDocument(<ParentUsingFunctionRef />);
    expectDev(console.error.calls.count()).toBe(1);
    expectDev(normalizeCodeLocInfo(console.error.calls.argsFor(0)[0])).toBe(
      'Warning: Stateless function components cannot be given refs. ' +
      'Attempts to access this ref will fail. Check the render method ' +
      'of `ParentUsingFunctionRef`.\n' +
      '    in StatelessComponent (at **)\n' +
      '    in div (at **)\n' +
      '    in Indirection (at **)\n' +
      '    in ParentUsingFunctionRef (at **)'
    );
    ReactTestUtils.renderIntoDocument(<ParentUsingFunctionRef />);
    expectDev(console.error.calls.count()).toBe(1);
  });
  it('deduplicates ref warnings based on element or owner', () => {
    spyOn(console, 'error');
    // Prevent the Babel transform adding a displayName.
    var createClassWithoutDisplayName = React.createClass;
    // When owner uses JSX, we can use exact line location to dedupe warnings
    var AnonymousParentUsingJSX = createClassWithoutDisplayName({
      render() {
        return <StatelessComponent name="A" ref={() => {}} />;
      },
    });
    const instance1 = ReactTestUtils.renderIntoDocument(<AnonymousParentUsingJSX />);
    expectDev(console.error.calls.count()).toBe(1);
    expectDev(console.error.calls.argsFor(0)[0]).toContain(
      'Warning: Stateless function components cannot be given refs.'
    );
    // Should be deduped (offending element is on the same line):
    instance1.forceUpdate();
    // Should also be deduped (offending element is on the same line):
    ReactTestUtils.renderIntoDocument(<AnonymousParentUsingJSX />);
    expectDev(console.error.calls.count()).toBe(1);
    console.error.calls.reset();
    // When owner doesn't use JSX, and is anonymous, we warn once per internal instance.
    var AnonymousParentNotUsingJSX = createClassWithoutDisplayName({
      render() {
        return React.createElement(StatelessComponent, {name: 'A', 'ref': () => {}});
      },
    });
    const instance2 = ReactTestUtils.renderIntoDocument(<AnonymousParentNotUsingJSX />);
    expectDev(console.error.calls.count()).toBe(1);
    expectDev(console.error.calls.argsFor(0)[0]).toContain(
      'Warning: Stateless function components cannot be given refs.'
    );
    // Should be deduped (same internal instance):
    instance2.forceUpdate();
    expectDev(console.error.calls.count()).toBe(1);
    // Could not be deduped (different internal instance):
    ReactTestUtils.renderIntoDocument(<AnonymousParentNotUsingJSX />);
    expectDev(console.error.calls.count()).toBe(2);
    expectDev(console.error.calls.argsFor(1)[0]).toContain(
      'Warning: Stateless function components cannot be given refs.'
    );
    console.error.calls.reset();
    // When owner doesn't use JSX, but is named, we warn once per owner name
    class NamedParentNotUsingJSX extends React.Component {
      render() {
        return React.createElement(StatelessComponent, {name: 'A', 'ref': () => {}});
      }
    }
    const instance3 = ReactTestUtils.renderIntoDocument(<NamedParentNotUsingJSX />);
    expectDev(console.error.calls.count()).toBe(1);
    expectDev(console.error.calls.argsFor(0)[0]).toContain(
      'Warning: Stateless function components cannot be given refs.'
    );
    // Should be deduped (same owner name):
    instance3.forceUpdate();
    expectDev(console.error.calls.count()).toBe(1);
    // Should also be deduped (same owner name):
    ReactTestUtils.renderIntoDocument(<NamedParentNotUsingJSX />);
    expectDev(console.error.calls.count()).toBe(1);
    console.error.calls.reset();
  });
  it('should provide a null ref', () => {
    function Child() {
      return <div />;
    }
    var comp = ReactTestUtils.renderIntoDocument(<Child />);
    expect(comp).toBe(null);
  });
  it('should use correct name in key warning', () => {
    function Child() {
      return <div>{[<span />]}</div>;
    }
    spyOn(console, 'error');
    ReactTestUtils.renderIntoDocument(<Child />);
    expectDev(console.error.calls.count()).toBe(1);
    expectDev(console.error.calls.argsFor(0)[0]).toContain('a unique "key" prop');
    expectDev(console.error.calls.argsFor(0)[0]).toContain('Child');
  });
  it('should support default props and prop types', () => {
    function Child(props) {
      return <div>{props.test}</div>;
    }
    Child.defaultProps = {test: 2};
    Child.propTypes = {test: React.PropTypes.string};
    spyOn(console, 'error');
    ReactTestUtils.renderIntoDocument(<Child />);
    expectDev(console.error.calls.count()).toBe(1);
    expect(
      console.error.calls.argsFor(0)[0].replace(/\(at .+?:\d+\)/g, '(at **)')
    ).toBe(
      'Warning: Failed prop type: Invalid prop `test` of type `number` ' +
      'supplied to `Child`, expected `string`.\n' +
      '    in Child (at **)'
    );
  });
  it('should receive context', () => {
    class Parent extends React.Component {
      static childContextTypes = {
        lang: React.PropTypes.string,
      };
      getChildContext() {
        return {lang: 'en'};
      }
      render() {
        return <Child />;
      }
    }
    function Child(props, context) {
      return <div>{context.lang}</div>;
    }
    Child.contextTypes = {lang: React.PropTypes.string};
    var el = document.createElement('div');
    ReactDOM.render(<Parent />, el);
    expect(el.textContent).toBe('en');
  });
  it('should work with arrow functions', () => {
    var Child = function() {
      return <div />;
    };
    // Will create a new bound function without a prototype, much like a native
    // arrow function.
    Child = Child.bind(this);
    expect(() => ReactTestUtils.renderIntoDocument(<Child />)).not.toThrow();
  });
  it('should allow simple functions to return null', () => {
    var Child = function() {
      return null;
    };
    expect(() => ReactTestUtils.renderIntoDocument(<Child />)).not.toThrow();
  });
  it('should allow simple functions to return false', () => {
    function Child() {
      return false;
    }
    expect(() => ReactTestUtils.renderIntoDocument(<Child />)).not.toThrow();
  });
});
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/dom/src/components/fixtures/index.js 
 | 28 
							 | 
	const React = window.React;
import RangeInputFixtures from './range-inputs';
import TextInputFixtures from './text-inputs';
import SelectFixtures from './selects';
import TextAreaFixtures from './textareas/';
/**
 * A simple routing component that renders the appropriate
 * fixture based on the location pathname.
 */
const FixturesPage = React.createClass({
  render() {
    switch (window.location.pathname) {
      case '/text-inputs':
        return <TextInputFixtures />;
      case '/range-inputs':
        return <RangeInputFixtures />;
      case '/selects':
        return <SelectFixtures />;
      case '/textareas':
        return <TextAreaFixtures />;
      default:
        return <p>Please select a test fixture.</p>;
    }
  },
});
module.exports = FixturesPage;
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	scripts/release-manager/commands/start-release.js 
 | 44 
							 | 
	// fetch upstream
// checkout 15-dev, update
// merge upstream/15-stable in
// done
'use strict';
const chalk = require('chalk');
var git = require('./utils/git');
module.exports = function(vorpal, app) {
  vorpal
    .command('start-release')
    .description('Start the process for shipping the next release')
    .action(function(args) {
      return new Promise((resolve, reject) => {
        // TODO: ensure that repo has upstream remote, correct branches setup.
        if (!git.isClean(app)) {
          this.log('ERROR: repo not in clean state');
          return reject();
        }
        // Fetch upstream - this ensures upstream/15-stable is updated and we
        // won't rely on the local branch.
        git.fetch(app, 'upstream');
        // Checkout 15-dev
        git.checkout(app, '15-dev');
        // Update to ensure latest commits are in. Will hit network again but
        // shouldn't need to get anything.
        git.pull(app);
        // Merge 15-stable in
        git.merge(app, 'upstream/15-stable', false);
        this.log(chalk.green.bold(`OK!`));
        this.log(`You can now start cherry-picking commits to this branch using the "stable-prs" command.`);
        resolve();
      });
    });
};
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	src/renderers/dom/fiber/ReactDOMFrameScheduling.js 
 | 149 
							 | 
	/**
 * Copyright 2013-present, Facebook, Inc.
 * All rights reserved.
 *
 * This source code is licensed under the BSD-style license found in the
 * LICENSE file in the root directory of this source tree. An additional grant
 * of patent rights can be found in the PATENTS file in the same directory.
 *
 * @providesModule ReactDOMFrameScheduling
 * @flow
 */
'use strict';
// This a built-in polyfill for requestIdleCallback. It works by scheduling
// a requestAnimationFrame, store the time for the start of the frame, then
// schedule a postMessage which gets scheduled after paint. Within the
// postMessage handler do as much work as possible until time + frame rate.
// By separating the idle call into a separate event tick we ensure that
// layout, paint and other browser work is counted against the available time.
// The frame rate is dynamically adjusted.
import type { Deadline } from 'ReactFiberReconciler';
var invariant = require('invariant');
// TODO: There's no way to cancel these, because Fiber doesn't atm.
let rAF : (callback : (time : number) => void) => number;
let rIC : (callback : (deadline : Deadline) => void) => number;
if (typeof requestAnimationFrame !== 'function') {
  invariant(
    false,
    'React depends on requestAnimationFrame. Make sure that you load a ' +
    'polyfill in older browsers.'
  );
} else if (typeof requestIdleCallback !== 'function') {
  // Wrap requestAnimationFrame and polyfill requestIdleCallback.
  var scheduledRAFCallback = null;
  var scheduledRICCallback = null;
  var isIdleScheduled = false;
  var isAnimationFrameScheduled = false;
  var frameDeadline = 0;
  // We start out assuming that we run at 30fps but then the heuristic tracking
  // will adjust this value to a faster fps if we get more frequent animation
  // frames.
  var previousFrameTime = 33;
  var activeFrameTime = 33;
  var frameDeadlineObject = {
    timeRemaining: (
      typeof performance === 'object' &&
      typeof performance.now === 'function' ? function() {
        // We assume that if we have a performance timer that the rAF callback
        // gets a performance timer value. Not sure if this is always true.
        return frameDeadline - performance.now();
      } : function() {
        // As a fallback we use Date.now.
        return frameDeadline - Date.now();
      }
    ),
  };
  // We use the postMessage trick to defer idle work until after the repaint.
  var messageKey =
    '__reactIdleCallback$' + Math.random().toString(36).slice(2);
  var idleTick = function(event) {
    if (event.source !== window || event.data !== messageKey) {
      return;
    }
    isIdleScheduled = false;
    var callback = scheduledRICCallback;
    scheduledRICCallback = null;
    if (callback) {
      callback(frameDeadlineObject);
    }
  };
  // Assumes that we have addEventListener in this environment. Might need
  // something better for old IE.
  window.addEventListener('message', idleTick, false);
  var animationTick = function(rafTime) {
    isAnimationFrameScheduled = false;
    var nextFrameTime = rafTime - frameDeadline + activeFrameTime;
    if (nextFrameTime < activeFrameTime && previousFrameTime < activeFrameTime) {
      if (nextFrameTime < 8) {
        // Defensive coding. We don't support higher frame rates than 120hz.
        // If we get lower than that, it is probably a bug.
        nextFrameTime = 8;
      }
      // If one frame goes long, then the next one can be short to catch up.
      // If two frames are short in a row, then that's an indication that we
      // actually have a higher frame rate than what we're currently optimizing.
      // We adjust our heuristic dynamically accordingly. For example, if we're
      // running on 120hz display or 90hz VR display.
      // Take the max of the two in case one of them was an anomaly due to
      // missed frame deadlines.
      activeFrameTime = nextFrameTime < previousFrameTime ?
                        previousFrameTime : nextFrameTime;
    } else {
      previousFrameTime = nextFrameTime;
    }
    frameDeadline = rafTime + activeFrameTime;
    if (!isIdleScheduled) {
      isIdleScheduled = true;
      window.postMessage(messageKey, '*');
    }
    var callback = scheduledRAFCallback;
    scheduledRAFCallback = null;
    if (callback) {
      callback(rafTime);
    }
  };
  rAF = function(callback : (time : number) => void) : number {
    // This assumes that we only schedule one callback at a time because that's
    // how Fiber uses it.
    scheduledRAFCallback = callback;
    if (!isAnimationFrameScheduled) {
      // If rIC didn't already schedule one, we need to schedule a frame.
      isAnimationFrameScheduled = true;
      requestAnimationFrame(animationTick);
    }
    return 0;
  };
  rIC = function(callback : (deadline : Deadline) => void) : number {
    // This assumes that we only schedule one callback at a time because that's
    // how Fiber uses it.
    scheduledRICCallback = callback;
    if (!isAnimationFrameScheduled) {
      // If rAF didn't already schedule one, we need to schedule a frame.
      // TODO: If this rAF doesn't materialize because the browser throttles, we
      // might want to still have setTimeout trigger rIC as a backup to ensure
      // that we keep performing work.
      isAnimationFrameScheduled = true;
      requestAnimationFrame(animationTick);
    }
    return 0;
  };
} else {
  rAF = requestAnimationFrame;
  rIC = requestIdleCallback;
}
exports.rAF = rAF;
exports.rIC = rIC;
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	src/isomorphic/__tests__/React-test.js 
 | 42 
							 | 
	/**
 * Copyright 2013-present, Facebook, Inc.
 * All rights reserved.
 *
 * This source code is licensed under the BSD-style license found in the
 * LICENSE file in the root directory of this source tree. An additional grant
 * of patent rights can be found in the PATENTS file in the same directory.
 *
 * @emails react-core
 */
'use strict';
describe('React', () => {
  var React;
  beforeEach(() => {
    React = require('React');
  });
  it('should log a deprecation warning once when using React.__spread', () => {
    spyOn(console, 'error');
    React.__spread({});
    React.__spread({});
    expectDev(console.error.calls.count()).toBe(1);
    expectDev(console.error.calls.argsFor(0)[0]).toContain(
      'React.__spread is deprecated and should not be used'
    );
  });
  it('should log a deprecation warning once when using React.createMixin', () => {
    spyOn(console, 'error');
    React.createMixin();
    React.createMixin();
    expectDev(console.error.calls.count()).toBe(1);
    expectDev(console.error.calls.argsFor(0)[0]).toContain(
      'React.createMixin is deprecated and should not be used'
    );
  });
});
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	scripts/release-manager/commands/q.js 
 | 14 
							 | 
	/**
 * Stupid command to run exit. 'q' is way shorter, like less.
 */
'use strict';
module.exports = function(vorpal, config) {
  vorpal
    .command('q')
    .hidden()
    .action((args, cb) => {
      vorpal.exec('exit').then(cb);
    });
};
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/dom/src/components/fixtures/selects/index.js 
 | 37 
							 | 
	const React = window.React;
const SelectFixture = React.createClass({
  getInitialState() {
    return { value: '' };
  },
  onChange(event) {
    this.setState({ value: event.target.value });
  },
  render() {
    return (
      <form>
        <fieldset>
          <legend>Controlled</legend>
          <select value={this.state.value} onChange={this.onChange}>
            <option value="">Select a color</option>
            <option value="red">Red</option>
            <option value="blue">Blue</option>
            <option value="green">Green</option>
          </select>
          <span className="hint">Value: {this.state.value}</span>
        </fieldset>
        <fieldset>
          <legend>Uncontrolled</legend>
          <select defaultValue="">
            <option value="">Select a color</option>
            <option value="red">Red</option>
            <option value="blue">Blue</option>
            <option value="gree">Green</option>
          </select>
        </fieldset>
      </form>
    );
  },
});
export default SelectFixture;
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	scripts/release-manager/commands/docs-prs.js 
 | 139 
							 | 
	'use strict';
const chalk = require('chalk');
const git = require('./utils/git');
const DOCS_LABEL = 'Documentation: needs merge to stable';
// FOR DOCS
// get all issues with label
// ensure all have pull_request
//   FAIL: log issues that aren't prs
// fetch each pr, (issues.pull_request.url)
// sort each by merged_at
// git cherry-pick -x sha || git cherry-pick -x -m1 sha
//   (or use API to look up number of parents, 2 = use -m1)
// track progress. on fail, pause and force user to handle manually, continue? prompt
// git push
// update labels on each PR
//   ALT: dump link to https://github.com/facebook/react/issues?q=label%3A%22Documentation%3A+needs+merge+to+stable%22+is%3Aclosed
//        and say manual step to remove label
module.exports = function(vorpal, app) {
  vorpal
    .command('docs-prs')
    .description('Get list of documentation pull requests that need to be merged to the stable branch')
    .action(function(args, actionCB) {
      const branch = git.getBranch(app);
      if (!branch.match(/-stable$/)) {
        this.log(chalk.red('Aborting...'));
        this.log(
          `You need to be on the latest stable branch in the React repo ` +
          `to execute this command.\nYou are currently in ${branch}.`
        );
        actionCB();
        return;
      }
      const query = {
        labels: [DOCS_LABEL].join(), // github-api doesn't join automatically
        state: 'closed',
      };
      app.ghissues.listIssues(query, (err, body) => {
        app.writeTo('issues.json', body);
        // console.log(body);
        // fs.writeFileSync('body.json', JSON.stringify(body, null, 2));
        // fs.writeFileSync('headers.json', JSON.stringify(headers, null, 2));
        // const prs = require('./body');
        // This API *could* return issues that aren't pull requests, so filter out
        // issues that don't have pull_request set.
        const pulls = body.filter((issue) => issue.pull_request);
        // We don't enough data about the pull request (merge sha or merge time) so we
        // need to fetch more. We'll use promises so we don't have to count completions.
        const pullPromises = pulls.map((pr) => {
          return new Promise((resolve, reject) => {
            app.ghrepo.getPullRequest(pr.number, (err, body) => {
              if (err) {
                reject(err);
              }
              app.writeTo(`pr-${pr.number}.json`, body);
              // We want to track the original issue as well since it has the
              // label information.
              const richPull = body;
              richPull.__originalIssue = pr;
              resolve(richPull);
            });
          });
        });
        Promise.all(pullPromises).then((richPulls) => {
          richPulls.forEach((pr) => {
            // Convert merged_at to real Date for sorting
            pr.merged_at_date = new Date(pr.merged_at);
          });
          richPulls = richPulls.sort((a, b) => a.merged_at_date - b.merged_at_date);
          this.log(`Found ${chalk.bold(richPulls.length)} pull requests:`);
          richPulls.forEach((pr) => {
            this.log(`${pr.html_url}: ${chalk.bold(pr.title)}`);
          });
          this.prompt({
            name: 'merge',
            type: 'confirm',
            message: `Merge these ${richPulls.length} pull requests?`,
          }, (res) => {
            if (res.merge) {
              richPulls.forEach((pr) => {
                git.cherryPickMerge(app, pr.merge_commit_sha);
              });
              this.prompt({
                name: 'push',
                type: 'confirm',
                message: 'Push these commits upstream?',
              }, (res) => {
                if (res.push) {
                  git.push(app);
                  this.log(`Pushed upstream! Removing "${DOCS_LABEL}" label from pull requests.`);
                }
                // TODO: actually test this
                var removeLabelsPromises = richPulls.map((pr) => {
                  return new Promise((resolve, reject) => {
                    const updatedLabels = pr.__originalIssue.labels
                      .filter((label) => label.name !== DOCS_LABEL)
                      .map(label => label.name);
                    app.ghissues.editIssue(pr.number, {labels: updatedLabels}, (err, body) => {
                      if (err) {
                        reject(err);
                      } else {
                        resolve(pr);
                      }
                    });
                  });
                });
                Promise.all(removeLabelsPromises).then(() => {
                  this.log('Done!');
                  actionCB();
                });
              });
            } else {
              actionCB();
            }
          });
        });
      });
    });
};
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/packaging/webpack-alias/config.js 
 | 15 
							 | 
	var path = require('path');
module.exports = {
  entry: './input',
  output: {
    filename: 'output.js',
  },
  resolve: {
    root: path.resolve('../../../build/packages'),
    alias: {
      'react': 'react/dist/react-with-addons',
      'react-dom': 'react-dom/dist/react-dom',
    },
  },
};
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	scripts/release-manager/commands/npm-check-access.js 
 | 40 
							 | 
	'use strict';
const npmUtils = require('./utils/npm');
const chalk = require('chalk');
const opn = require('opn');
module.exports = function(vorpal, app) {
  vorpal
    .command('npm-check-access')
    .description('Check to ensure you have correct access to npm packages')
    .action(function(args) {
      return new Promise((resolve, reject) => {
        const username = npmUtils.whoami(app);
        if (!username) {
          return reject(
            `${chalk.red('FAILED')} You aren't logged in to npm. Please run ` +
            `${chalk.underline(`npm adduser`)} and try again.`
          );
        }
        this.log(`${chalk.green('OK')} Logged in as ${chalk.bold(username)}`);
        const packagesNeedingAccess = npmUtils.packagesNeedingAccess(app, username);
        if (packagesNeedingAccess.length) {
          this.log(
            `${chalk.red('FAILED')} You don't have access to all of the packages ` +
            `you need. We just opened a URL to file a new issue requesting access.`
          );
          opn(
            npmUtils.generateAccessNeededIssue(username, packagesNeedingAccess),
            {wait: false}
          ).then(resolve);
        } else {
          this.log(`${chalk.green('OK')} You can publish all React packages`);
          resolve();
        }
      });
    });
};
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	src/renderers/testing/stack/ReactTestRendererStack.js 
 | 160 
							 | 
	/**
 * Copyright 2013-present, Facebook, Inc.
 * All rights reserved.
 *
 * This source code is licensed under the BSD-style license found in the
 * LICENSE file in the root directory of this source tree. An additional grant
 * of patent rights can be found in the PATENTS file in the same directory.
 *
 * @providesModule ReactTestRendererStack
 * @preventMunge
 * @flow
 */
'use strict';
var ReactComponentEnvironment = require('ReactComponentEnvironment');
var ReactDefaultBatchingStrategy = require('ReactDefaultBatchingStrategy');
var ReactEmptyComponent = require('ReactEmptyComponent');
var ReactMultiChild = require('ReactMultiChild');
var ReactHostComponent = require('ReactHostComponent');
var ReactTestMount = require('ReactTestMount');
var ReactTestReconcileTransaction = require('ReactTestReconcileTransaction');
var ReactUpdates = require('ReactUpdates');
var ReactTestTextComponent = require('ReactTestTextComponent');
var ReactTestEmptyComponent = require('ReactTestEmptyComponent');
var invariant = require('invariant');
import type { ReactElement } from 'ReactElementType';
import type { ReactInstance } from 'ReactInstanceType';
import type { ReactText } from 'ReactTypes';
type ReactTestRendererJSON = {
  type: string,
  props: { [propName: string]: any },
  children: null | Array<ReactText | ReactTestRendererJSON>,
  $$typeof?: any
}
/**
 * Drill down (through composites and empty components) until we get a native or
 * native text component.
 *
 * This is pretty polymorphic but unavoidable with the current structure we have
 * for `_renderedChildren`.
 */
function getRenderedHostOrTextFromComponent(component) {
  var rendered;
  while ((rendered = component._renderedComponent)) {
    component = rendered;
  }
  return component;
}
var UNSET = {};
class ReactTestComponent {
  _currentElement: ReactElement;
  _renderedChildren: null | Object;
  _topLevelWrapper: null | ReactInstance;
  _hostContainerInfo: null | Object;
  _nodeMock: Object;
  constructor(element: ReactElement) {
    this._currentElement = element;
    this._renderedChildren = null;
    this._topLevelWrapper = null;
    this._hostContainerInfo = null;
    this._nodeMock = UNSET;
  }
  mountComponent(
    transaction: ReactTestReconcileTransaction,
    nativeParent: null | ReactTestComponent,
    hostContainerInfo: Object,
    context: Object,
  ) {
    var element = this._currentElement;
    this._hostContainerInfo = hostContainerInfo;
    this._nodeMock = hostContainerInfo.createNodeMock(element);
    // $FlowFixMe https://github.com/facebook/flow/issues/1805
    this.mountChildren(element.props.children, transaction, context);
  }
  receiveComponent(
    nextElement: ReactElement,
    transaction: ReactTestReconcileTransaction,
    context: Object,
  ) {
    this._currentElement = nextElement;
    // $FlowFixMe https://github.com/facebook/flow/issues/1805
    this.updateChildren(nextElement.props.children, transaction, context);
  }
  getPublicInstance(): Object {
    invariant(
      this._nodeMock !== UNSET,
      'getPublicInstance should not be called before component is mounted.'
    );
    return this._nodeMock;
  }
  toJSON(): ReactTestRendererJSON {
    // not using `children`, but I don't want to rewrite without destructuring
    // eslint-disable-next-line no-unused-vars
    var {children, ...props} = this._currentElement.props;
    var childrenJSON = [];
    for (var key in this._renderedChildren) {
      var inst = this._renderedChildren[key];
      inst = getRenderedHostOrTextFromComponent(inst);
      var json = inst.toJSON();
      if (json !== undefined) {
        childrenJSON.push(json);
      }
    }
    var object: ReactTestRendererJSON = {
      type: this._currentElement.type,
      props: props,
      children: childrenJSON.length ? childrenJSON : null,
    };
    Object.defineProperty(object, '$$typeof', {
      value: Symbol.for('react.test.json'),
    });
    return object;
  }
  getHostNode(): void {}
  unmountComponent(safely, skipLifecycle): void {
    // $FlowFixMe https://github.com/facebook/flow/issues/1805
    this.unmountChildren(safely, skipLifecycle);
  }
}
Object.assign(ReactTestComponent.prototype, ReactMultiChild);
// =============================================================================
ReactUpdates.injection.injectReconcileTransaction(
  ReactTestReconcileTransaction
);
ReactUpdates.injection.injectBatchingStrategy(ReactDefaultBatchingStrategy);
ReactHostComponent.injection.injectGenericComponentClass(ReactTestComponent);
ReactHostComponent.injection.injectTextComponentClass(ReactTestTextComponent);
ReactEmptyComponent.injection.injectEmptyComponentFactory(function() {
  return new ReactTestEmptyComponent();
});
ReactComponentEnvironment.injection.injectEnvironment({
  processChildrenUpdates: function() {},
  replaceNodeWithMarkup: function() {},
});
var ReactTestRenderer = {
  create: ReactTestMount.render,
  /* eslint-disable camelcase */
  unstable_batchedUpdates: ReactUpdates.batchedUpdates,
  /* eslint-enable camelcase */
};
module.exports = ReactTestRenderer;
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	scripts/release-manager/cli.js 
 | 137 
							 | 
	#!/usr/bin/env node
'use strict';
const chalk = require('chalk');
const Vorpal = require('vorpal');
const GitHubAPI = require('github-api');
const untildify = require('untildify');
const fs = require('fs');
const path = require('path');
const os = require('os');
const child_process = require('child_process');
const execSync = child_process.execSync;
const vorpal = new Vorpal();
// Expects to be in a checkout of react that is a sibling of the react checkout you want to operate on
// eg ~/code/react@release-manager/scripts/release-manager & ~/code/react
// TODO: Make this an argument to the script
let PATH_TO_REPO = null;
const PATH_TO_CONFIG = path.resolve(os.homedir(), '.react-release-manager.json');
const DEFAULT_CONFIG = {
  githubToken: null,
  reactPath: path.resolve('../../../react'),
};
// Quick dry run opt-in. This allows quick debugging of execInRepo without
// actually running the command, ensuring no accidental publishing.
const DRY_RUN = false;
// Enabled commands
const COMMANDS = [
  'init',
  'docs-prs',
  'q',
  'stable-prs',
  'version',
  'npm-publish',
  'npm-check-access',
  'npm-grant-access',
  'start-release',
];
// HELPERS
// Simple helper to write out some JSON for debugging
function writeTo(file, data) {
  var folder = path.join(__dirname, 'data');
  if (!fs.existsSync(folder)) {
    fs.mkdirSync(folder);
  }
  fs.writeFile(
    path.join(folder, file),
    JSON.stringify(data, null, 2)
  );
}
// Wrapper around exec so we don't have to worry about paths
function execInRepo(command) {
  vorpal.log(chalk.gray(`Executing ${chalk.underline(command)}`));
  if (DRY_RUN) {
    return '';
  }
  return execSync(command, {
    cwd: PATH_TO_REPO,
    encoding: 'utf8',
  }).trim();
}
function getReactVersion() {
  return (JSON.parse(fs.readFileSync(path.join(PATH_TO_REPO, 'package.json'), 'utf8'))).version;
}
const app = {
  vorpal,
  updateConfig() {
    // TODO: write this. This should make it possible to start without a config
    // and go through the init process to create one and then re-init the github
    // setup.
    this.config = this.loadConfig();
  },
  loadConfig() {
    try {
      // TODO: validate config
      let config = JSON.parse(fs.readFileSync(PATH_TO_CONFIG, 'utf8'));
      config.reactPath = path.normalize(untildify(config.reactPath));
      PATH_TO_REPO = config.reactPath;
      return config;
    } catch (e) {
      console.error('Attempt to load config file failed. Please run `init` command for initial setup or make sure ~/.react-release-manager.json is valid JSON. Using a default config which may not work properly.');
      return DEFAULT_CONFIG;
    }
  },
  init() {
    this.config = this.loadConfig();
    this.PATH_TO_CONFIG = PATH_TO_CONFIG;
    // GITHUB
    this.github = new GitHubAPI({
      token: this.config.githubToken,
    });
    this.ghrepo = this.github.getRepo('facebook', 'react');
    this.ghissues = this.github.getIssues('facebook', 'react');
    // HELPERS
    this.writeTo = writeTo;
    this.execInRepo = execInRepo;
    this.getReactVersion = getReactVersion;
    // Register commands
    COMMANDS.forEach((command) => {
      vorpal.use(require(`./commands/${command}`)(vorpal, app));
    });
    var v = vorpal
      .history('react-release-manager')
      .delimiter('rrm \u2234');
    v.exec('help');
    v.show();
  },
};
app.init();
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	src/renderers/shared/utils/validateCallback.js 
 | 40 
							 | 
	/**
 * Copyright 2013-present, Facebook, Inc.
 * All rights reserved.
 *
 * This source code is licensed under the BSD-style license found in the
 * LICENSE file in the root directory of this source tree. An additional grant
 * of patent rights can be found in the PATENTS file in the same directory.
 *
 * @providesModule validateCallback
 * @flow
 */
'use strict';
const invariant = require('invariant');
function formatUnexpectedArgument(arg: any) {
  let type = typeof arg;
  if (type !== 'object') {
    return type;
  }
  let displayName = arg.constructor && arg.constructor.name || type;
  let keys = Object.keys(arg);
  if (keys.length > 0 && keys.length < 20) {
    return `${displayName} (keys: ${keys.join(', ')})`;
  }
  return displayName;
}
function validateCallback(callback: ?Function, callerName: string) {
  invariant(
    !callback || typeof callback === 'function',
    '%s(...): Expected the last optional `callback` argument to be a ' +
    'function. Instead received: %s.',
    callerName,
    formatUnexpectedArgument(callback)
  );
}
module.exports = validateCallback;
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	scripts/release-manager/.eslintrc.js 
 | 7 
							 | 
	'use strict';
module.exports = {
  rules: {
    'no-shadow': 0,
  },
};
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/packaging/webpack/input.js 
 | 16 
							 | 
	var React = require('react');
var CSSTransitionGroup = require('react-addons-css-transition-group');
var ReactDOM = require('react-dom');
ReactDOM.render(
  React.createElement(CSSTransitionGroup, {
    transitionName: 'example',
    transitionAppear: true,
    transitionAppearTimeout: 500,
    transitionEnterTimeout: 0,
    transitionLeaveTimeout: 0,
  }, React.createElement('h1', null,
    'Hello World!'
  )),
  document.getElementById('container')
);
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	src/renderers/shared/fiber/ReactFiberErrorLogger.js 
 | 90 
							 | 
	/**
 * Copyright 2013-present, Facebook, Inc.
 * All rights reserved.
 *
 * This source code is licensed under the BSD-style license found in the
 * LICENSE file in the root directory of this source tree. An additional grant
 * of patent rights can be found in the PATENTS file in the same directory.
 *
 * @providesModule ReactFiberErrorLogger
 * @flow
 */
'use strict';
import type { CapturedError } from 'ReactFiberScheduler';
function logCapturedError(capturedError : CapturedError) : void {
  if (__DEV__) {
    const {
      componentName,
      componentStack,
      error,
      errorBoundaryName,
      errorBoundaryFound,
      willRetry,
    } = capturedError;
    const {
      message,
      name,
      stack,
    } = error;
    const errorSummary = message
      ? `${name}: ${message}`
      : name;
    const componentNameMessage = componentName
      ? `React caught an error thrown by ${componentName}.`
      : 'React caught an error thrown by one of your components.';
    // Error stack varies by browser, eg:
    // Chrome prepends the Error name and type.
    // Firefox, Safari, and IE don't indent the stack lines.
    // Format it in a consistent way for error logging.
    let formattedCallStack = stack.slice(0, errorSummary.length) === errorSummary
      ? stack.slice(errorSummary.length)
      : stack;
    formattedCallStack = formattedCallStack
      .trim()
      .split('\n')
      .map((line) => `\n    ${line.trim()}`)
      .join();
    let errorBoundaryMessage;
    // errorBoundaryFound check is sufficient; errorBoundaryName check is to satisfy Flow.
    if (errorBoundaryFound && errorBoundaryName) {
      if (willRetry) {
        errorBoundaryMessage =
          `React will try to recreate this component tree from scratch ` +
          `using the error boundary you provided, ${errorBoundaryName}.`;
      } else {
        errorBoundaryMessage =
          `This error was initially handled by the error boundary ${errorBoundaryName}. ` +
          `Recreating the tree from scratch failed so React will unmount the tree.`;
      }
    } else {
      // TODO Link to unstable_handleError() documentation once it exists.
      errorBoundaryMessage =
        'Consider adding an error boundary to your tree to customize error handling behavior.';
    }
    console.error(
      `${componentNameMessage} You should fix this error in your code. ${errorBoundaryMessage}\n\n` +
      `${errorSummary}\n\n` +
      `The error is located at: ${componentStack}\n\n` +
      `The error was thrown at: ${formattedCallStack}`
    );
  }
  if (!__DEV__) {
    const { error } = capturedError;
    console.error(
      `React caught an error thrown by one of your components.\n\n${error.stack}`
    );
  }
}
exports.logCapturedError = logCapturedError;
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/packaging/webpack-alias/input.js 
 | 16 
							 | 
	var React = require('react');
var ReactDOM = require('react-dom');
var CSSTransitionGroup = React.addons.CSSTransitionGroup;
ReactDOM.render(
  React.createElement(CSSTransitionGroup, {
    transitionName: 'example',
    transitionAppear: true,
    transitionAppearTimeout: 500,
    transitionEnterTimeout: 0,
    transitionLeaveTimeout: 0,
  }, React.createElement('h1', null,
    'Hello World!'
  )),
  document.getElementById('container')
);
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	src/renderers/shared/fiber/ReactFiberDevToolsHook.js 
 | 74 
							 | 
	/**
 * Copyright 2013-present, Facebook, Inc.
 * All rights reserved.
 *
 * This source code is licensed under the BSD-style license found in the
 * LICENSE file in the root directory of this source tree. An additional grant
 * of patent rights can be found in the PATENTS file in the same directory.
 *
 * @providesModule ReactFiberDevToolsHook
 * @flow
 */
/* globals __REACT_DEVTOOLS_GLOBAL_HOOK__ */
'use strict';
import type { Fiber } from 'ReactFiber';
import type { FiberRoot } from 'ReactFiberRoot';
if (__DEV__) {
  var warning = require('warning');
}
let rendererID = null;
let injectInternals = null;
let onCommitRoot = null;
let onCommitUnmount = null;
if (
  typeof __REACT_DEVTOOLS_GLOBAL_HOOK__ !== 'undefined' &&
  typeof __REACT_DEVTOOLS_GLOBAL_HOOK__.supportsFiber
) {
  let {
    inject,
    onCommitFiberRoot,
    onCommitFiberUnmount,
  } = __REACT_DEVTOOLS_GLOBAL_HOOK__;
  injectInternals = function(internals : Object) {
    warning(rendererID == null, 'Cannot inject into DevTools twice.');
    rendererID = inject(internals);
  };
  onCommitRoot = function(root : FiberRoot) {
    if (rendererID == null) {
      return;
    }
    try {
      onCommitFiberRoot(rendererID, root);
    } catch (err) {
      // Catch all errors because it is unsafe to throw in the commit phase.
      if (__DEV__) {
        warning(false, 'React DevTools encountered an error: %s', err);
      }
    }
  };
  onCommitUnmount = function(fiber : Fiber) {
    if (rendererID == null) {
      return;
    }
    try {
      onCommitFiberUnmount(rendererID, fiber);
    } catch (err) {
      // Catch all errors because it is unsafe to throw in the commit phase.
      if (__DEV__) {
        warning(false, 'React DevTools encountered an error: %s', err);
      }
    }
  };
}
exports.injectInternals = injectInternals;
exports.onCommitRoot = onCommitRoot;
exports.onCommitUnmount = onCommitUnmount;
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	scripts/release-manager/commands/utils/git.js 
 | 93 
							 | 
	'use strict';
function isClean(app) {
  return getStatus(app) === '';
}
function getStatus(app) {
  return app.execInRepo(`git status --untracked-files=no --porcelain`);
}
function getBranch(app) {
  return app.execInRepo(`git symbolic-ref HEAD`);
}
function fetch(app, remote) {
  return app.execInRepo(`git fetch ${remote}`);
}
function checkout(app, ref) {
  return app.execInRepo(`git checkout ${ref}`);
}
function pull(app, ref) {
  ref = ref || '';
  return app.execInRepo(`git pull ${ref}`);
}
function merge(app, ref, ff, msg) {
  let opts = [
    ff ? '--ff-only' : '--no-ff',
  ];
  if (!msg) {
    opts.push('--no-edit');
  } else {
    opts.push(`-m '${msg}''`);
  }
  return app.execInRepo(`git merge ${opts.join(' ')} ${ref}`);
}
function tag(app, tag, ref) {
  ref = ref || '';
  return app.execInRepo(`git tag ${tag} ${ref}`);
}
function commit(app, msg, all) {
  return app.execInRepo(`git commit -m '${msg}' ${all ? '-a' : ''}`);
}
function push(app, remote, refspec, tags) {
  let opts = [
    remote,
    refspec,
    tags ? '--tags' : '',
  ];
  return app.execInRepo(`git push ${opts.join(' ')}`);
}
/**
 * Cherry picks a single sha to the given branch. Very crude, but establishes
 * some API. We don't know if the sha is a merge or a squashed commit so just
 * try both.
 *
 * Assume we're already on the right branch.
 */
function cherryPickMerge(app, ref) {
  // console.log(`cherry picking ${sha}`)
  // git cherry-pick -x sha || git cherry-pick -x -m1 sha
  try {
    app.execInRepo(`git cherry-pick -x ${ref}`);
  } catch (e) {
    // Assume for now this just means it was actually a merge.
    // TODO: gracefully handle other cases, like possibility the commit was
    // already cherry-picked and should be skipped.
    app.execInRepo(`git cherry-pick -x -m1 ${ref}`);
  }
}
module.exports = {
  getBranch,
  getStatus,
  isClean,
  commit,
  checkout,
  fetch,
  pull,
  push,
  merge,
  tag,
  cherryPickMerge,
};
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/packaging/systemjs-builder/config.js 
 | 6 
							 | 
	System.config({
  paths: {
    react: '../../../build/react-with-addons.js',
    'react-dom': '../../../build/react-dom.js',
  },
});
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/dom/src/style.css 
 | 86 
							 | 
	*,
*:before,
*:after {
  box-sizing: border-box;
}
body {
  font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", "Roboto", "Oxygen", "Ubuntu", "Cantarell", "Fira Sans", "Droid Sans", "Helvetica Neue", sans-serif;
  margin: 0;
  padding: 0;
}
select {
  width: 120px;
}
.header {
  background: #222;
  box-shadow: inset 0 -1px 3px #000;
  line-height: 32px;
  overflow: hidden;
  padding: 8px 16px;
}
.header__inner {
  display: table;
  margin: 0 auto;
  max-width: 1000px;
  overflow: hidden;
  text-align: center;
  width: 100%;
}
.header__logo {
  color: #efefef;
  display: table-cell;
  vertical-align: middle;
  white-space: nowrap;
}
.header__logo img {
  display: inline-block;
  margin-right: 8px;
  vertical-align: middle;
}
.header-controls {
  display: table-cell;
  text-align: right;
  vertical-align: middle;
  width: 100%;
}
.sr-only {
  clip: rect(0, 0, 0, 0);
  height: 0;
  margin: -1px;
  position: absolute;
  width: 0;
}
.container {
  margin: 0 auto;
  max-width: 900px;
  overflow: hidden;
  padding: 20px;
}
label {
  display: block;
  font-size: 12px;
  letter-spacing: 0.01em;
  margin-bottom: 4px;
  text-transform: uppercase;
}
.field {
  padding: 8px;
}
fieldset {
  border: 1px solid #aaa;
  float: left;
  padding: 16px;
  width: 49%;
}
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/packaging/rjs/input.js 
 | 15 
							 | 
	require(['react', 'react-dom'], function(React, ReactDOM) {
  var CSSTransitionGroup = React.addons.CSSTransitionGroup;
  ReactDOM.render(
    React.createElement(CSSTransitionGroup, {
      transitionName: 'example',
      transitionAppear: true,
      transitionAppearTimeout: 500,
      transitionEnterTimeout: 0,
      transitionLeaveTimeout: 0,
    }, React.createElement('h1', null,
      'Hello World!'
    )),
    document.getElementById('container')
  );
});
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/dom/src/index.js 
 | 8 
							 | 
	const React = window.React;
const ReactDOM = window.ReactDOM;
import App from './components/App';
ReactDOM.render(
  <App />,
  document.getElementById('root')
);
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	packages/react-test-renderer/fiber.js 
 | 3 
							 | 
	'use strict';
module.exports = require('./lib/ReactTestRendererFiber');
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	src/renderers/shared/fiber/__tests__/ReactCoroutine-test.js 
 | 126 
							 | 
	/**
 * Copyright 2013-present, Facebook, Inc.
 * All rights reserved.
 *
 * This source code is licensed under the BSD-style license found in the
 * LICENSE file in the root directory of this source tree. An additional grant
 * of patent rights can be found in the PATENTS file in the same directory.
 *
 * @emails react-core
 */
'use strict';
var React;
var ReactNoop;
var ReactCoroutine;
var ReactFeatureFlags;
describe('ReactCoroutine', () => {
  beforeEach(() => {
    jest.resetModules();
    React = require('React');
    ReactNoop = require('ReactNoop');
    ReactCoroutine = require('ReactCoroutine');
    ReactFeatureFlags = require('ReactFeatureFlags');
    ReactFeatureFlags.disableNewFiberFeatures = false;
  });
  function div(...children) {
    children = children.map(c => typeof c === 'string' ? { text: c } : c);
    return { type: 'div', children, prop: undefined };
  }
  function span(prop) {
    return { type: 'span', children: [], prop };
  }
  it('should render a coroutine', () => {
    var ops = [];
    function Continuation({ isSame }) {
      ops.push(['Continuation', isSame]);
      return <span prop={isSame ? 'foo==bar' : 'foo!=bar'} />;
    }
    // An alternative API could mark Continuation as something that needs
    // yielding. E.g. Continuation.yieldType = 123;
    function Child({ bar }) {
      ops.push(['Child', bar]);
      return ReactCoroutine.createYield({
        props: {
          bar: bar,
        },
        continuation: Continuation,
      });
    }
    function Indirection() {
      ops.push('Indirection');
      return [<Child bar={true} />, <Child bar={false} />];
    }
    function HandleYields(props, yields) {
      ops.push('HandleYields');
      return yields.map(y =>
        <y.continuation isSame={props.foo === y.props.bar} />
      );
    }
    // An alternative API could mark Parent as something that needs
    // yielding. E.g. Parent.handler = HandleYields;
    function Parent(props) {
      ops.push('Parent');
      return ReactCoroutine.createCoroutine(
        props.children,
        HandleYields,
        props
      );
    }
    function App() {
      return <div><Parent foo={true}><Indirection /></Parent></div>;
    }
    ReactNoop.render(<App />);
    ReactNoop.flush();
    expect(ops).toEqual([
      'Parent',
      'Indirection',
      ['Child', true],
      // Yield
      ['Child', false],
      // Yield
      'HandleYields',
      // Continue yields
      ['Continuation', true],
      ['Continuation', false],
    ]);
    expect(ReactNoop.getChildren()).toEqual([
      div(
        span('foo==bar'),
        span('foo!=bar'),
      ),
    ]);
  });
  it('should update a coroutine', () => {
    function Continuation({ isSame }) {
      return <span prop={isSame ? 'foo==bar' : 'foo!=bar'} />;
    }
    function Child({ bar }) {
      return ReactCoroutine.createYield({
        props: {
          bar: bar,
        },
        continuation: Continuation,
      });
    }
    function Indirection() {
      return [<Child bar={true} />, <Child bar={false} />];
    }
    function HandleYields(props, yields) {
      return yields.map(y =>
        <y.continuation isSame={props.foo === y.props.bar} />
      );
    }
    function Parent(props) {
      return ReactCoroutine.createCoroutine(
        props.children,
        HandleYields,
        props
      );
    }
    function App(props) {
      return <div><Parent foo={props.foo}><Indirection /></Parent></div>;
    }
    ReactNoop.render(<App foo={true} />);
    ReactNoop.flush();
    expect(ReactNoop.getChildren()).toEqual([
      div(
        span('foo==bar'),
        span('foo!=bar'),
      ),
    ]);
    ReactNoop.render(<App foo={false} />);
    ReactNoop.flush();
    expect(ReactNoop.getChildren()).toEqual([
      div(
        span('foo!=bar'),
        span('foo==bar'),
      ),
    ]);
  });
  it('should unmount a composite in a coroutine', () => {
    var ops = [];
    class Continuation extends React.Component {
      render() {
        ops.push('Continuation');
        return <div />;
      }
      componentWillUnmount() {
        ops.push('Unmount Continuation');
      }
    }
    class Child extends React.Component {
      render() {
        ops.push('Child');
        return ReactCoroutine.createYield(Continuation);
      }
      componentWillUnmount() {
        ops.push('Unmount Child');
      }
    }
    function HandleYields(props, yields) {
      ops.push('HandleYields');
      return yields.map(ContinuationComponent => <ContinuationComponent />);
    }
    class Parent extends React.Component {
      render() {
        ops.push('Parent');
        return ReactCoroutine.createCoroutine(
          this.props.children,
          HandleYields,
          this.props
        );
      }
      componentWillUnmount() {
        ops.push('Unmount Parent');
      }
    }
    ReactNoop.render(<Parent><Child /></Parent>);
    ReactNoop.flush();
    expect(ops).toEqual([
      'Parent',
      'Child',
      'HandleYields',
      'Continuation',
    ]);
    ops = [];
    ReactNoop.render(<div />);
    ReactNoop.flush();
    expect(ops).toEqual([
      'Unmount Parent',
      'Unmount Child',
      'Unmount Continuation',
    ]);
  });
  it('should handle deep updates in coroutine', () => {
    let instances = {};
    class Counter extends React.Component {
      state = {value: 5};
      render() {
        instances[this.props.id] = this;
        return ReactCoroutine.createYield(this.state.value);
      }
    }
    function App(props) {
      return ReactCoroutine.createCoroutine(
        [
          <Counter id="a" />,
          <Counter id="b" />,
          <Counter id="c" />,
        ],
        (p, yields) => yields.map(y => <span prop={y * 100} />),
        {}
      );
    }
    ReactNoop.render(<App />);
    ReactNoop.flush();
    expect(ReactNoop.getChildren()).toEqual([
      span(500),
      span(500),
      span(500),
    ]);
    instances.a.setState({value: 1});
    instances.b.setState({value: 2});
    ReactNoop.flush();
    expect(ReactNoop.getChildren()).toEqual([
      span(100),
      span(200),
      span(500),
    ]);
  });
});
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	scripts/release-manager/commands/npm-publish.js 
 | 74 
							 | 
	
// Publishes the built npm packages from build/packages
// 1. Show checklist (automate later)
// 2. Prompt to ensure build is complete
// 3. Prompt for dist-tag?
'use strict';
const path = require('path');
const semver = require('semver');
const glob = require('glob');
module.exports = function(vorpal, app) {
  vorpal
    .command('npm-publish')
    .description('After you\'ve run grunt release, publishes the npm packages')
    .action(function(args) {
      return new Promise((resolve, reject) => {
        const currentVersion = app.getReactVersion();
        const isStable = semver.prerelease(currentVersion) === null;
        this.log(`Preparing to publish v${currentVersion}…`);
        if (isStable) {
          this.log(`"latest" dist-tag will be added to this version`);
        }
        // TODO: show checklist
        this.prompt([
          {
            type: 'confirm',
            message: 'Did you run `grunt build` or `grunt release` and bump the version number?',
            default: false,
            name: 'checklist',
          },
        ]).then((answers) => {
          if (!answers.checklist) {
            return reject('Complete the build process first');
          }
          // We'll grab all the tarballs and publish those directly. This
          // is how we've historically done it, though in the past it was
          // just npm publish pkg1.tgz && npm publish pkg2.tgz. This
          // avoided the need to cd and publish.
          const tgz = glob.sync('build/packages/*.tgz', {
            cwd: app.config.reactPath,
          });
          // Just in case they didn't actually prep this.
          // TODO: verify packages?
          if (tgz.length === 0) {
            reject('No built packages found');
          }
          // TODO: track success
          tgz.forEach((file) => {
            this.log(app.execInRepo(`npm publish ${file} --tag=next`));
          });
          if (isStable) {
            tgz.forEach((file) => {
              const pkg = path.parse(file).name;
              this.log(app.execInRepo(`npm dist-tag add ${pkg}@${currentVersion} latest`));
            });
          }
          resolve();
        });
      });
    });
};
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/packaging/systemjs-builder/input.js 
 | 16 
							 | 
	import React from 'react';
import ReactDOM from 'react-dom';
var CSSTransitionGroup = React.addons.CSSTransitionGroup;
ReactDOM.render(
  React.createElement(CSSTransitionGroup, {
    transitionName: 'example',
    transitionAppear: true,
    transitionAppearTimeout: 500,
    transitionEnterTimeout: 0,
    transitionLeaveTimeout: 0,
  }, React.createElement('h1', null,
    'Hello World!'
  )),
  document.getElementById('container')
);
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/packaging/systemjs-builder/build.js 
 | 12 
							 | 
	var Builder = require('systemjs-builder');
var builder = new Builder('/', './config.js');
builder
  .buildStatic('./input.js', './output.js')
  .then(function() {
    console.log('Build complete');
  })
  .catch(function(err) {
    console.log('Build error');
    console.log(err);
  });
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/packaging/brunch/input.js 
 | 16 
							 | 
	var React = require('react');
var CSSTransitionGroup = require('react-addons-css-transition-group');
var ReactDOM = require('react-dom');
ReactDOM.render(
  React.createElement(CSSTransitionGroup, {
    transitionName: 'example',
    transitionAppear: true,
    transitionAppearTimeout: 500,
    transitionEnterTimeout: 0,
    transitionLeaveTimeout: 0,
  }, React.createElement('h1', null,
    'Hello World!'
  )),
  document.getElementById('container')
);
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/dom/src/components/Header.js 
 | 68 
							 | 
	import { parse, stringify } from 'query-string';
const React = window.React;
const Header = React.createClass({
  getInitialState() {
    const query = parse(window.location.search);
    const version = query.version || 'local';
    const versions = [version];
    return { version, versions };
  },
  componentWillMount() {
    fetch('https://api.github.com/repos/facebook/react/tags', { mode: 'cors' })
      .then(res => res.json())
      .then(tags => {
        let versions = tags.map(tag => tag.name.slice(1));
        versions = ['local', ...versions];
        this.setState({ versions });
      });
  },
  handleVersionChange(event) {
    const query = parse(window.location.search);
    query.version = event.target.value;
    if (query.version === 'local') {
      delete query.version;
    }
    window.location.search = stringify(query);
  },
  handleFixtureChange(event) {
    window.location.pathname = event.target.value;
  },
  render() {
    return (
    <header className="header">
      <div className="header__inner">
        <span className="header__logo">
          <img src="https://facebook.github.io/react/img/logo.svg" alt="" width="32" height="32" />
          React Sandbox (v{React.version})
        </span>
        <div className="header-controls">
          <label htmlFor="example">
            <span className="sr-only">Select an example</span>
            <select value={window.location.pathname} onChange={this.handleFixtureChange}>
              <option value="/">Select a Fixture</option>
              <option value="/range-inputs">Range Inputs</option>
              <option value="/text-inputs">Text Inputs</option>
              <option value="/selects">Selects</option>
              <option value="/textareas">Textareas</option>
            </select>
          </label>
          <label htmlFor="react_version">
            <span className="sr-only">Select a version to test</span>
            <select
              value={this.state.version}
              onChange={this.handleVersionChange}>
                {this.state.versions.map(version => (
                  <option key={version} value={version}>{version}</option>
                ))}
            </select>
          </label>
        </div>
      </div>
    </header>
    );
  },
});
export default Header;
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	src/renderers/shared/stack/reconciler/ReactCompositeComponentTypes.js 
 | 19 
							 | 
	/**
 * Copyright 2013-present, Facebook, Inc.
 * All rights reserved.
 *
 * This source code is licensed under the BSD-style license found in the
 * LICENSE file in the root directory of this source tree. An additional grant
 * of patent rights can be found in the PATENTS file in the same directory.
 *
 * @providesModule ReactCompositeComponentTypes
 * @flow
 */
export type CompositeComponentTypes = 0 | 1 | 2;
module.exports = {
  ImpureClass: 0,
  PureClass: 1,
  StatelessFunctional: 2,
};
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/dom/src/components/App.js 
 | 20 
							 | 
	const React = window.React;
import Header from './Header';
import Fixtures from './fixtures';
import '../style.css';
const App = React.createClass({
  render() {
    return (
      <div>
        <Header />
        <div className="container" >
          <Fixtures />
        </div>
      </div>
    );
  },
});
export default App;
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/dom/src/components/fixtures/text-inputs/index.js 
 | 66 
							 | 
	const React = window.React;
const TextInputFixtures = React.createClass({
  getInitialState() {
    return {
      color: '#ffaaee',
    };
  },
  renderControlled(type) {
    let id = `controlled_${type}`;
    let onChange = e => {
      let value = e.target.value;
      if (type === 'number') {
        value = value === '' ? '' : parseFloat(value, 10) || 0;
      }
      this.setState({
        [type] : value,
      });
    };
    let state = this.state[type] || '';
    return (
      <div key={type} className="field">
        <label htmlFor={id}>{type}</label>
        <input id={id} type={type} value={state} onChange={onChange} />
          → {JSON.stringify(state)}
      </div>
    );
  },
  renderUncontrolled(type) {
    let id = `uncontrolled_${type}`;
    return (
      <div key={type} className="field">
        <label htmlFor={id}>{type}</label>
        <input id={id} type={type} />
      </div>
    );
  },
  render() {
    // https://developer.mozilla.org/en-US/docs/Web/HTML/Element/input
    let types = [
      'text', 'email', 'number', 'url', 'tel',
      'color', 'date', 'datetime-local',
      'time', 'month', 'week', 'range', 'password',
    ];
    return (
      <form onSubmit={event => event.preventDefault()}>
        <fieldset>
          <legend>Controlled</legend>
          {types.map(this.renderControlled)}
        </fieldset>
        <fieldset>
          <legend>Uncontrolled</legend>
          {types.map(this.renderUncontrolled)}
        </fieldset>
      </form>
    );
  },
});
module.exports = TextInputFixtures;
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/packaging/rjs/config.js 
 | 10 
							 | 
	module.exports = {
  baseUrl: '.',
  name: 'input',
  out: 'output.js',
  optimize: 'none',
  paths: {
    react: '../../../build/react-with-addons',
    'react-dom': '../../../build/react-dom',
  },
};
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	scripts/release-manager/commands/init.js 
 | 70 
							 | 
	/**
 * Command to init a project. This will create the .config.json file if it
 * doesn't already exist.
 */
'use strict';
const chalk = require('chalk');
const fs = require('fs');
const path = require('path');
const untildify = require('untildify');
module.exports = function(vorpal, app) {
  vorpal
    .command('init')
    .description('Initializes a .config.json file for use')
    .action(function(args) {
      return new Promise((resolve, reject) => {
        fs.stat(app.PATH_TO_CONFIG, (err, stats) => {
          if (stats) {
            this.log('Config file exists, nothing to do.');
            reject();
            return;
          }
          this.prompt([
            {
              name: 'githubToken',
              type: 'input',
              message: `${chalk.bold('GitHub token?')} ${chalk.grey('(needs "repo" privs)')} `,
            },
            {
              name: 'reactPath',
              type: 'input',
              message: `${chalk.bold('Location of local React checkout?')} `,
              validate: (input) => {
                let npath = path.normalize(untildify(input));
                if (npath === '.') {
                  return 'Cannot be `.`';
                }
                let stats;
                try {
                  stats = fs.statSync(npath);
                } catch (e) {
                  return `Error: ${e}`;
                }
                if (!stats.isDirectory()) {
                  return `${npath} is not a directory.`;
                }
                // TODO: Look for markers indicating this is a React checkout.
                return true;
              },
            },
          ]).then((answers) => {
            fs.writeFile(app.PATH_TO_CONFIG, JSON.stringify(answers, null, 2), (err) => {
              if (err) {
                this.log('Error writing config file.', err);
                reject();
              }
              resolve();
            });
          });
        });
      });
    });
};
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/packaging/brunch/app/initialize.js 
 | 16 
							 | 
	var React = require('react');
var CSSTransitionGroup = require('react-addons-css-transition-group');
var ReactDOM = require('react-dom');
ReactDOM.render(
  React.createElement(CSSTransitionGroup, {
    transitionName: 'example',
    transitionAppear: true,
    transitionAppearTimeout: 500,
    transitionEnterTimeout: 0,
    transitionLeaveTimeout: 0,
  }, React.createElement('h1', null,
    'Hello World!'
  )),
  document.getElementById('container')
);
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/packaging/webpack/config.js 
 | 11 
							 | 
	var path = require('path');
module.exports = {
  entry: './input',
  output: {
    filename: 'output.js',
  },
  resolve: {
    root: path.resolve('../../../build/packages/'),
  },
};
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/packaging/brunch/config.js 
 | 10 
							 | 
	exports.config = {
  paths: {
    public: '.',
  },
  files: {
    javascripts: {
      joinTo: 'output.js',
    },
  },
};
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/dom/src/components/fixtures/range-inputs/index.js 
 | 28 
							 | 
	const React = window.React;
const RangeInputs = React.createClass({
  getInitialState() {
    return { value: 0.5 };
  },
  onChange(event) {
    this.setState({ value: event.target.value });
  },
  render() {
    return (
      <form>
        <fieldset>
          <legend>Controlled</legend>
          <input type="range" value={this.state.value} onChange={this.onChange} />
          <span className="hint">Value: {this.state.value}</span>
        </fieldset>
        <fieldset>
          <legend>Uncontrolled</legend>
          <input type="range" defaultValue={0.5} />
        </fieldset>
      </form>
    );
  },
});
export default RangeInputs;
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	scripts/release-manager/commands/utils/npm.js 
 | 49 
							 | 
	'use strict';
const querystring = require('querystring');
const PACKAGES = [
  'react-addons-create-fragment',
  'react-addons-css-transition-group',
  'react-addons-linked-state-mixin',
  'react-addons-perf',
  'react-addons-pure-render-mixin',
  'react-addons-shallow-compare',
  'react-addons-test-utils',
  'react-addons-transition-group',
  'react-addons-update',
  'react-dom',
  'react-native-renderer',
  'react-test-renderer',
  'react',
];
function whoami(app) {
  return app.execInRepo('npm whoami');
}
function packagesNeedingAccess(app, username) {
  let packages = JSON.parse(app.execInRepo(`npm access ls-packages ${username}`));
  return PACKAGES.filter((pkg) => packages[pkg] !== 'read-write');
}
function generateAccessNeededIssue(username, packages) {
  let data = {
    title: `npm access request: ${username}`,
    body: `In order to publish React to npm I need access to the following repositories:
${packages.map((pkg) => `- [${pkg}](https://npm.im/${pkg})`).join('\n')}`,
  };
  return `https://github.com/facebook/react/issues/new?${querystring.stringify(data)}`;
}
function grantAccess(app, username, packages) {
  packages.forEach((pkg) => {
    app.execInRepo(`npm owner add ${username} ${pkg}`);
  });
}
module.exports.PACKAGES = PACKAGES;
module.exports.whoami = whoami;
module.exports.packagesNeedingAccess = packagesNeedingAccess;
module.exports.generateAccessNeededIssue = generateAccessNeededIssue;
module.exports.grantAccess = grantAccess;
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	fixtures/packaging/browserify/input.js 
 | 16 
							 | 
	var React = require('react');
var CSSTransitionGroup = require('react-addons-css-transition-group');
var ReactDOM = require('react-dom');
ReactDOM.render(
  React.createElement(CSSTransitionGroup, {
    transitionName: 'example',
    transitionAppear: true,
    transitionAppearTimeout: 500,
    transitionEnterTimeout: 0,
    transitionLeaveTimeout: 0,
  }, React.createElement('h1', null,
    'Hello World!'
  )),
  document.getElementById('container')
);
 
 | 
	facebook_react 
 | 
	2017-01-28 
 | 
	751d22117213ebf425e1a81cc7b2def10e67ae5f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java 
 | 570 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 * <p>
 * http://www.apache.org/licenses/LICENSE-2.0
 * <p>
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.MockAM;
import org.apache.hadoop.yarn.server.resourcemanager.MockNM;
import org.apache.hadoop.yarn.server.resourcemanager.MockRM;
import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Ignore;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Set;
public class TestCapacitySchedulerSurgicalPreemption
    extends CapacitySchedulerPreemptionTestBase {
  @Override
  @Before
  public void setUp() throws Exception {
    super.setUp();
    conf.setBoolean(
        CapacitySchedulerConfiguration.PREEMPTION_SELECT_CANDIDATES_FOR_RESERVED_CONTAINERS,
        true);
  }
  @Test(timeout = 60000)
  public void testSimpleSurgicalPreemption()
      throws Exception {
    /**
     * Test case: Submit two application (app1/app2) to different queues, queue
     * structure:
     *
     * <pre>
     *             Root
     *            /  |  \
     *           a   b   c
     *          10   20  70
     * </pre>
     *
     * 1) Two nodes (n1/n2) in the cluster, each of them has 20G.
     *
     * 2) app1 submit to queue-a first, it asked 32 * 1G containers
     * We will allocate 16 on n1 and 16 on n2.
     *
     * 3) app2 submit to queue-c, ask for one 1G container (for AM)
     *
     * 4) app2 asks for another 6G container, it will be reserved on n1
     *
     * Now: we have:
     * n1: 17 from app1, 1 from app2, and 1 reserved from app2
     * n2: 16 from app1.
     *
     * After preemption, we should expect:
     * Preempt 4 containers from app1 on n1.
     */
    MockRM rm1 = new MockRM(conf);
    rm1.getRMContext().setNodeLabelManager(mgr);
    rm1.start();
    MockNM nm1 = rm1.registerNode("h1:1234", 20 * GB);
    MockNM nm2 = rm1.registerNode("h2:1234", 20 * GB);
    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
    RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
    RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
    // launch an app to queue, AM container should be launched in nm1
    RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "a");
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
    am1.allocate("*", 1 * GB, 32, new ArrayList<ContainerId>());
    // Do allocation for node1/node2
    for (int i = 0; i < 32; i++) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
      cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
    }
    // App1 should have 33 containers now
    FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(
        am1.getApplicationAttemptId());
    Assert.assertEquals(33, schedulerApp1.getLiveContainers().size());
    // 17 from n1 and 16 from n2
    waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode1.getNodeID()),
        am1.getApplicationAttemptId(), 17);
    waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode2.getNodeID()),
        am1.getApplicationAttemptId(), 16);
    // Submit app2 to queue-c and asks for a 1G container for AM
    RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "c");
    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1);
    // NM1/NM2 has available resource = 2G/4G
    Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId())
        .getUnallocatedResource().getMemorySize());
    Assert.assertEquals(4 * GB, cs.getNode(nm2.getNodeId())
        .getUnallocatedResource().getMemorySize());
    // AM asks for a 1 * GB container
    am2.allocate(Arrays.asList(ResourceRequest
        .newInstance(Priority.newInstance(1), ResourceRequest.ANY,
            Resources.createResource(6 * GB), 1)), null);
    // Call allocation once on n1, we should expect the container reserved on n1
    cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
    Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer());
    // Get edit policy and do one update
    SchedulingEditPolicy editPolicy = getSchedulingEditPolicy(rm1);
    // Call edit schedule twice, and check if 4 containers from app1 at n1 killed
    editPolicy.editSchedule();
    editPolicy.editSchedule();
    waitNumberOfLiveContainersFromApp(schedulerApp1, 29);
    // 13 from n1 (4 preempted) and 16 from n2
    waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode1.getNodeID()),
        am1.getApplicationAttemptId(), 13);
    waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode2.getNodeID()),
        am1.getApplicationAttemptId(), 16);
    rm1.close();
  }
  @Test(timeout = 60000)
  public void testSurgicalPreemptionWithAvailableResource()
      throws Exception {
    /**
     * Test case: Submit two application (app1/app2) to different queues, queue
     * structure:
     *
     * <pre>
     *             Root
     *            /  |  \
     *           a   b   c
     *          10   20  70
     * </pre>
     *
     * 1) Two nodes (n1/n2) in the cluster, each of them has 20G.
     *
     * 2) app1 submit to queue-b, asks for 1G * 5
     *
     * 3) app2 submit to queue-c, ask for one 4G container (for AM)
     *
     * After preemption, we should expect:
     * Preempt 3 containers from app1 and AM of app2 successfully allocated.
     */
    MockRM rm1 = new MockRM(conf);
    rm1.getRMContext().setNodeLabelManager(mgr);
    rm1.start();
    MockNM nm1 = rm1.registerNode("h1:1234", 20 * GB);
    MockNM nm2 = rm1.registerNode("h2:1234", 20 * GB);
    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
    RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
    RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
    // launch an app to queue, AM container should be launched in nm1
    RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "a");
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
    am1.allocate("*", 1 * GB, 38, new ArrayList<ContainerId>());
    // Do allocation for node1/node2
    for (int i = 0; i < 38; i++) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
      cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
    }
    // App1 should have 31 containers now
    FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(
        am1.getApplicationAttemptId());
    Assert.assertEquals(39, schedulerApp1.getLiveContainers().size());
    // 17 from n1 and 16 from n2
    waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode1.getNodeID()),
        am1.getApplicationAttemptId(), 20);
    waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode2.getNodeID()),
        am1.getApplicationAttemptId(), 19);
    // Submit app2 to queue-c and asks for a 4G container for AM
    RMApp app2 = rm1.submitApp(4 * GB, "app", "user", null, "c");
    FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(
        ApplicationAttemptId.newInstance(app2.getApplicationId(), 1));
    // Call editSchedule: containers are selected to be preemption candidate
    ProportionalCapacityPreemptionPolicy editPolicy =
        (ProportionalCapacityPreemptionPolicy) getSchedulingEditPolicy(rm1);
    editPolicy.editSchedule();
    Assert.assertEquals(3, editPolicy.getToPreemptContainers().size());
    // Call editSchedule again: selected containers are killed
    editPolicy.editSchedule();
    waitNumberOfLiveContainersFromApp(schedulerApp1, 36);
    // Call allocation, containers are reserved
    cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
    cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
    waitNumberOfReservedContainersFromApp(schedulerApp2, 1);
    // Call editSchedule twice and allocation once, container should get allocated
    editPolicy.editSchedule();
    editPolicy.editSchedule();
    int tick = 0;
    while (schedulerApp2.getLiveContainers().size() != 1 && tick < 10) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
      cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
      tick++;
      Thread.sleep(100);
    }
    waitNumberOfReservedContainersFromApp(schedulerApp2, 0);
    rm1.close();
  }
  @Test(timeout = 60000)
  public void testPriorityPreemptionWhenAllQueuesAreBelowGuaranteedCapacities()
      throws Exception {
    /**
     * Test case: Submit two application (app1/app2) to different queues, queue
     * structure:
     *
     * <pre>
     *             Root
     *            /  |  \
     *           a   b   c
     *          10   20  70
     * </pre>
     *
     * 1) Two nodes (n1/n2) in the cluster, each of them has 20G.
     *
     * 2) app1 submit to queue-b first, it asked 6 * 1G containers
     * We will allocate 4 on n1 (including AM) and 3 on n2.
     *
     * 3) app2 submit to queue-c, ask for one 18G container (for AM)
     *
     * After preemption, we should expect:
     * Preempt 3 containers from app1 and AM of app2 successfully allocated.
     */
    conf.setPUOrderingPolicyUnderUtilizedPreemptionEnabled(true);
    conf.setPUOrderingPolicyUnderUtilizedPreemptionDelay(1000);
    conf.setQueueOrderingPolicy(CapacitySchedulerConfiguration.ROOT,
        CapacitySchedulerConfiguration.QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY);
    // Queue c has higher priority than a/b
    conf.setQueuePriority(CapacitySchedulerConfiguration.ROOT + ".c", 1);
    MockRM rm1 = new MockRM(conf);
    rm1.getRMContext().setNodeLabelManager(mgr);
    rm1.start();
    MockNM nm1 = rm1.registerNode("h1:1234", 20 * GB);
    MockNM nm2 = rm1.registerNode("h2:1234", 20 * GB);
    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
    RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
    RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
    // launch an app to queue, AM container should be launched in nm1
    RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "b");
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
    am1.allocate("*", 1 * GB, 6, new ArrayList<>());
    // Do allocation for node1/node2
    for (int i = 0; i < 3; i++) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
      cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
    }
    // App1 should have 7 containers now, so the abs-used-cap of b is
    // 7 / 40 = 17.5% < 20% (guaranteed)
    FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(
        am1.getApplicationAttemptId());
    Assert.assertEquals(7, schedulerApp1.getLiveContainers().size());
    // 4 from n1 and 3 from n2
    waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode1.getNodeID()),
        am1.getApplicationAttemptId(), 4);
    waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode2.getNodeID()),
        am1.getApplicationAttemptId(), 3);
    // Submit app2 to queue-c and asks for a 1G container for AM
    RMApp app2 = rm1.submitApp(18 * GB, "app", "user", null, "c");
    FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(
        ApplicationAttemptId.newInstance(app2.getApplicationId(), 1));
    while (cs.getNode(rmNode1.getNodeID()).getReservedContainer() == null) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
      Thread.sleep(10);
    }
    // Call editSchedule immediately: containers are not selected
    ProportionalCapacityPreemptionPolicy editPolicy =
        (ProportionalCapacityPreemptionPolicy) getSchedulingEditPolicy(rm1);
    editPolicy.editSchedule();
    Assert.assertEquals(0, editPolicy.getToPreemptContainers().size());
    // Sleep the timeout interval, we should be able to see containers selected
    Thread.sleep(1000);
    editPolicy.editSchedule();
    Assert.assertEquals(2, editPolicy.getToPreemptContainers().size());
    // Call editSchedule again: selected containers are killed, and new AM
    // container launched
    editPolicy.editSchedule();
    // Do allocation till reserved container allocated
    while (cs.getNode(rmNode1.getNodeID()).getReservedContainer() != null) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNode1));
      Thread.sleep(10);
    }
    waitNumberOfLiveContainersFromApp(schedulerApp2, 1);
    rm1.close();
  }
  @Test(timeout = 300000)
  public void testPriorityPreemptionRequiresMoveReservation()
      throws Exception {
    /**
     * Test case: Submit two application (app1/app2) to different queues, queue
     * structure:
     *
     * <pre>
     *             Root
     *            /  |  \
     *           a   b   c
     *          10   20  70
     * </pre>
     *
     * 1) 3 nodes in the cluster, 10G for each
     *
     * 2) app1 submit to queue-b first, it asked 2G each,
     *    it can get 2G on n1 (AM), 2 * 2G on n2
     *
     * 3) app2 submit to queue-c, with 2G AM container (allocated on n3)
     *    app2 requires 9G resource, which will be reserved on n3
     *
     * We should expect container unreserved from n3 and allocated on n1/n2
     */
    conf.setPUOrderingPolicyUnderUtilizedPreemptionEnabled(true);
    conf.setPUOrderingPolicyUnderUtilizedPreemptionDelay(1000);
    conf.setQueueOrderingPolicy(CapacitySchedulerConfiguration.ROOT,
        CapacitySchedulerConfiguration.QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY);
    conf.setPUOrderingPolicyUnderUtilizedPreemptionMoveReservation(true);
    // Queue c has higher priority than a/b
    conf.setQueuePriority(CapacitySchedulerConfiguration.ROOT + ".c", 1);
    MockRM rm1 = new MockRM(conf);
    rm1.getRMContext().setNodeLabelManager(mgr);
    rm1.start();
    MockNM nm1 = rm1.registerNode("h1:1234", 10 * GB);
    MockNM nm2 = rm1.registerNode("h2:1234", 10 * GB);
    MockNM nm3 = rm1.registerNode("h3:1234", 10 * GB);
    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
    RMNode rmNode1 = rm1.getRMContext().getRMNodes().get(nm1.getNodeId());
    RMNode rmNode2 = rm1.getRMContext().getRMNodes().get(nm2.getNodeId());
    RMNode rmNode3 = rm1.getRMContext().getRMNodes().get(nm3.getNodeId());
    // launch an app to queue, AM container should be launched in nm1
    RMApp app1 = rm1.submitApp(2 * GB, "app", "user", null, "b");
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, nm1);
    am1.allocate("*", 2 * GB, 2, new ArrayList<>());
    // Do allocation for node2 twice
    for (int i = 0; i < 2; i++) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
      cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
    }
    FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(
        am1.getApplicationAttemptId());
    Assert.assertEquals(3, schedulerApp1.getLiveContainers().size());
    // 1 from n1 and 2 from n2
    waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode1.getNodeID()),
        am1.getApplicationAttemptId(), 1);
    waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNode2.getNodeID()),
        am1.getApplicationAttemptId(), 2);
    // Submit app2 to queue-c and asks for a 2G container for AM, on n3
    RMApp app2 = rm1.submitApp(2 * GB, "app", "user", null, "c");
    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm3);
    FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(
        ApplicationAttemptId.newInstance(app2.getApplicationId(), 1));
    // Asks 1 * 9G container
    am2.allocate("*", 9 * GB, 1, new ArrayList<>());
    // Do allocation for node3 once
    cs.handle(new NodeUpdateSchedulerEvent(rmNode3));
    // Make sure container reserved on node3
    Assert.assertNotNull(
        cs.getNode(rmNode3.getNodeID()).getReservedContainer());
    // Call editSchedule immediately: nothing happens
    ProportionalCapacityPreemptionPolicy editPolicy =
        (ProportionalCapacityPreemptionPolicy) getSchedulingEditPolicy(rm1);
    editPolicy.editSchedule();
    Assert.assertNotNull(
        cs.getNode(rmNode3.getNodeID()).getReservedContainer());
    // Sleep the timeout interval, we should be able to see reserved container
    // moved to n2 (n1 occupied by AM)
    Thread.sleep(1000);
    editPolicy.editSchedule();
    Assert.assertNull(
        cs.getNode(rmNode3.getNodeID()).getReservedContainer());
    Assert.assertNotNull(
        cs.getNode(rmNode2.getNodeID()).getReservedContainer());
    Assert.assertEquals(am2.getApplicationAttemptId(), cs.getNode(
        rmNode2.getNodeID()).getReservedContainer().getApplicationAttemptId());
    // Do it again, we should see containers marked to be preempt
    editPolicy.editSchedule();
    Assert.assertEquals(2, editPolicy.getToPreemptContainers().size());
    // Call editSchedule again: selected containers are killed
    editPolicy.editSchedule();
    // Do allocation till reserved container allocated
    while (schedulerApp2.getLiveContainers().size() < 2) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNode2));
      Thread.sleep(200);
    }
    waitNumberOfLiveContainersFromApp(schedulerApp1, 1);
    rm1.close();
  }
  @Test(timeout = 60000)
  public void testPriorityPreemptionOnlyTriggeredWhenDemandingQueueUnsatisfied()
      throws Exception {
    /**
     * Test case: Submit two application (app1/app2) to different queues, queue
     * structure:
     *
     * <pre>
     *             Root
     *            /  |  \
     *           a   b   c
     *          10   20  70
     * </pre>
     *
     * 1) 10 nodes (n0-n9) in the cluster, each of them has 10G.
     *
     * 2) app1 submit to queue-b first, it asked 8 * 1G containers
     * We will allocate 1 container on each of n0-n10
     *
     * 3) app2 submit to queue-c, ask for 10 * 10G containers (including AM)
     *
     * After preemption, we should expect:
     * Preempt 7 containers from app1 and usage of app2 is 70%
     */
    conf.setPUOrderingPolicyUnderUtilizedPreemptionEnabled(true);
    conf.setPUOrderingPolicyUnderUtilizedPreemptionDelay(1000);
    conf.setQueueOrderingPolicy(CapacitySchedulerConfiguration.ROOT,
        CapacitySchedulerConfiguration.QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY);
    // Queue c has higher priority than a/b
    conf.setQueuePriority(CapacitySchedulerConfiguration.ROOT + ".c", 1);
    MockRM rm1 = new MockRM(conf);
    rm1.getRMContext().setNodeLabelManager(mgr);
    rm1.start();
    MockNM[] mockNMs = new MockNM[10];
    for (int i = 0; i < 10; i++) {
      mockNMs[i] = rm1.registerNode("h" + i + ":1234", 10 * GB);
    }
    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
    RMNode[] rmNodes = new RMNode[10];
    for (int i = 0; i < 10; i++) {
      rmNodes[i] = rm1.getRMContext().getRMNodes().get(mockNMs[i].getNodeId());
    }
    // launch an app to queue, AM container should be launched in nm1
    RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "b");
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, mockNMs[0]);
    am1.allocate("*", 1 * GB, 8, new ArrayList<>());
    // Do allocation for nm1-nm8
    for (int i = 1; i < 9; i++) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNodes[i]));
    }
    // App1 should have 9 containers now, so the abs-used-cap of b is 9%
    FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(
        am1.getApplicationAttemptId());
    Assert.assertEquals(9, schedulerApp1.getLiveContainers().size());
    for (int i = 0; i < 9; i++) {
      waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNodes[i].getNodeID()),
          am1.getApplicationAttemptId(), 1);
    }
    // Submit app2 to queue-c and asks for a 10G container for AM
    // Launch AM in NM9
    RMApp app2 = rm1.submitApp(10 * GB, "app", "user", null, "c");
    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, mockNMs[9]);
    FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(
        ApplicationAttemptId.newInstance(app2.getApplicationId(), 1));
    // Ask 10 * 10GB containers
    am2.allocate("*", 10 * GB, 10, new ArrayList<>());
    // Do allocation for all nms
    for (int i = 1; i < 10; i++) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNodes[i]));
    }
    // Check am2 reserved resource from nm1-nm9
    for (int i = 1; i < 9; i++) {
      Assert.assertNotNull("Should reserve on nm-" + i,
          cs.getNode(rmNodes[i].getNodeID()).getReservedContainer());
    }
    // Sleep the timeout interval, we should be able to see 6 containers selected
    // 6 (selected) + 1 (allocated) which makes target capacity to 70%
    Thread.sleep(1000);
    ProportionalCapacityPreemptionPolicy editPolicy =
        (ProportionalCapacityPreemptionPolicy) getSchedulingEditPolicy(rm1);
    editPolicy.editSchedule();
    checkNumberOfPreemptionCandidateFromApp(editPolicy, 6,
        am1.getApplicationAttemptId());
    // Call editSchedule again: selected containers are killed
    editPolicy.editSchedule();
    waitNumberOfLiveContainersFromApp(schedulerApp1, 3);
    // Do allocation for all nms
    for (int i = 1; i < 10; i++) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNodes[i]));
    }
    waitNumberOfLiveContainersFromApp(schedulerApp2, 7);
    waitNumberOfLiveContainersFromApp(schedulerApp1, 3);
    rm1.close();
  }
  @Test(timeout = 600000)
  public void testPriorityPreemptionFromHighestPriorityQueueAndOldestContainer()
      throws Exception {
    /**
     * Test case: Submit two application (app1/app2) to different queues, queue
     * structure:
     *
     * <pre>
     *             Root
     *            /  |  \
     *           a   b   c
     *          45  45  10
     * </pre>
     *
     * Priority of queue_a = 1
     * Priority of queue_b = 2
     *
     * 1) 5 nodes (n0-n4) in the cluster, each of them has 4G.
     *
     * 2) app1 submit to queue-c first (AM=1G), it asked 4 * 1G containers
     *    We will allocate 1 container on each of n0-n4. AM on n4.
     *
     * 3) app2 submit to queue-a, AM container=0.5G, allocated on n0
     *    Ask for 2 * 3.5G containers. (Reserved on n0/n1)
     *
     * 4) app2 submit to queue-b, AM container=0.5G, allocated on n2
     *    Ask for 2 * 3.5G containers. (Reserved on n2/n3)
     *
     * First we will preempt container on n2 since it is the oldest container of
     * Highest priority queue (b)
     */
    // Total preemption = 1G per round, which is 5% of cluster resource (20G)
    conf.setFloat(CapacitySchedulerConfiguration.TOTAL_PREEMPTION_PER_ROUND,
        0.05f);
    conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 512);
    conf.setPUOrderingPolicyUnderUtilizedPreemptionEnabled(true);
    conf.setPUOrderingPolicyUnderUtilizedPreemptionDelay(1000);
    conf.setQueueOrderingPolicy(CapacitySchedulerConfiguration.ROOT,
        CapacitySchedulerConfiguration.QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY);
    // A/B has higher priority
    conf.setQueuePriority(CapacitySchedulerConfiguration.ROOT + ".a", 1);
    conf.setQueuePriority(CapacitySchedulerConfiguration.ROOT + ".b", 2);
    conf.setCapacity(CapacitySchedulerConfiguration.ROOT + ".a", 45f);
    conf.setCapacity(CapacitySchedulerConfiguration.ROOT + ".b", 45f);
    conf.setCapacity(CapacitySchedulerConfiguration.ROOT + ".c", 10f);
    MockRM rm1 = new MockRM(conf);
    rm1.getRMContext().setNodeLabelManager(mgr);
    rm1.start();
    MockNM[] mockNMs = new MockNM[5];
    for (int i = 0; i < 5; i++) {
      mockNMs[i] = rm1.registerNode("h" + i + ":1234", 4 * GB);
    }
    CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler();
    RMNode[] rmNodes = new RMNode[5];
    for (int i = 0; i < 5; i++) {
      rmNodes[i] = rm1.getRMContext().getRMNodes().get(mockNMs[i].getNodeId());
    }
    // launch an app to queue, AM container should be launched in nm1
    RMApp app1 = rm1.submitApp(1 * GB, "app", "user", null, "c");
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm1, mockNMs[4]);
    am1.allocate("*", 1 * GB, 4, new ArrayList<>());
    // Do allocation for nm1-nm8
    for (int i = 0; i < 4; i++) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNodes[i]));
    }
    // App1 should have 5 containers now, one for each node
    FiCaSchedulerApp schedulerApp1 = cs.getApplicationAttempt(
        am1.getApplicationAttemptId());
    Assert.assertEquals(5, schedulerApp1.getLiveContainers().size());
    for (int i = 0; i < 5; i++) {
      waitNumberOfLiveContainersOnNodeFromApp(cs.getNode(rmNodes[i].getNodeID()),
          am1.getApplicationAttemptId(), 1);
    }
    // Submit app2 to queue-a and asks for a 0.5G container for AM (on n0)
    RMApp app2 = rm1.submitApp(512, "app", "user", null, "a");
    MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, mockNMs[0]);
    FiCaSchedulerApp schedulerApp2 = cs.getApplicationAttempt(
        ApplicationAttemptId.newInstance(app2.getApplicationId(), 1));
    // Ask 2 * 3.5GB containers
    am2.allocate("*", 3 * GB + 512, 2, new ArrayList<>());
    // Do allocation for n0-n1
    for (int i = 0; i < 2; i++) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNodes[i]));
    }
    // Check am2 reserved resource from nm0-nm1
    for (int i = 0; i < 2; i++) {
      Assert.assertNotNull("Should reserve on nm-" + i,
          cs.getNode(rmNodes[i].getNodeID()).getReservedContainer());
      Assert.assertEquals(cs.getNode(rmNodes[i].getNodeID())
          .getReservedContainer().getQueueName(), "a");
    }
    // Submit app3 to queue-b and asks for a 0.5G container for AM (on n2)
    RMApp app3 = rm1.submitApp(512, "app", "user", null, "b");
    MockAM am3 = MockRM.launchAndRegisterAM(app3, rm1, mockNMs[2]);
    FiCaSchedulerApp schedulerApp3 = cs.getApplicationAttempt(
        ApplicationAttemptId.newInstance(app3.getApplicationId(), 1));
    // Ask 2 * 3.5GB containers
    am3.allocate("*", 3 * GB + 512, 2, new ArrayList<>());
    // Do allocation for n2-n3
    for (int i = 2; i < 4; i++) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNodes[i]));
    }
    // Check am2 reserved resource from nm2-nm3
    for (int i = 2; i < 4; i++) {
      Assert.assertNotNull("Should reserve on nm-" + i,
          cs.getNode(rmNodes[i].getNodeID()).getReservedContainer());
      Assert.assertEquals(cs.getNode(rmNodes[i].getNodeID())
          .getReservedContainer().getQueueName(), "b");
    }
    // Sleep the timeout interval, we should be able to see 1 container selected
    Thread.sleep(1000);
    /* 1st container preempted is on n2 */
    ProportionalCapacityPreemptionPolicy editPolicy =
        (ProportionalCapacityPreemptionPolicy) getSchedulingEditPolicy(rm1);
    editPolicy.editSchedule();
    // We should have one to-preempt container, on node[2]
    Set<RMContainer> selectedToPreempt =
        editPolicy.getToPreemptContainers().keySet();
    Assert.assertEquals(1, selectedToPreempt.size());
    Assert.assertEquals(mockNMs[2].getNodeId(),
        selectedToPreempt.iterator().next().getAllocatedNode());
    // Call editSchedule again: selected containers are killed
    editPolicy.editSchedule();
    waitNumberOfLiveContainersFromApp(schedulerApp1, 4);
    // Make sure the container killed, then do allocation for all nms
    for (int i = 0; i < 4; i++) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNodes[i]));
    }
    waitNumberOfLiveContainersFromApp(schedulerApp1, 4);
    waitNumberOfLiveContainersFromApp(schedulerApp2, 1);
    waitNumberOfLiveContainersFromApp(schedulerApp3, 2);
    /* 2nd container preempted is on n3 */
    editPolicy.editSchedule();
    // We should have one to-preempt container, on node[3]
    selectedToPreempt =
        editPolicy.getToPreemptContainers().keySet();
    Assert.assertEquals(1, selectedToPreempt.size());
    Assert.assertEquals(mockNMs[3].getNodeId(),
        selectedToPreempt.iterator().next().getAllocatedNode());
    // Call editSchedule again: selected containers are killed
    editPolicy.editSchedule();
    waitNumberOfLiveContainersFromApp(schedulerApp1, 3);
    // Do allocation for all nms
    for (int i = 0; i < 4; i++) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNodes[i]));
    }
    waitNumberOfLiveContainersFromApp(schedulerApp1, 3);
    waitNumberOfLiveContainersFromApp(schedulerApp2, 1);
    waitNumberOfLiveContainersFromApp(schedulerApp3, 3);
    /* 3rd container preempted is on n0 */
    editPolicy.editSchedule();
    // We should have one to-preempt container, on node[0]
    selectedToPreempt =
        editPolicy.getToPreemptContainers().keySet();
    Assert.assertEquals(1, selectedToPreempt.size());
    Assert.assertEquals(mockNMs[0].getNodeId(),
        selectedToPreempt.iterator().next().getAllocatedNode());
    // Call editSchedule again: selected containers are killed
    editPolicy.editSchedule();
    waitNumberOfLiveContainersFromApp(schedulerApp1, 2);
    // Do allocation for all nms
    for (int i = 0; i < 4; i++) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNodes[i]));
    }
    waitNumberOfLiveContainersFromApp(schedulerApp1, 2);
    waitNumberOfLiveContainersFromApp(schedulerApp2, 2);
    waitNumberOfLiveContainersFromApp(schedulerApp3, 3);
    /* 4th container preempted is on n1 */
    editPolicy.editSchedule();
    // We should have one to-preempt container, on node[0]
    selectedToPreempt =
        editPolicy.getToPreemptContainers().keySet();
    Assert.assertEquals(1, selectedToPreempt.size());
    Assert.assertEquals(mockNMs[1].getNodeId(),
        selectedToPreempt.iterator().next().getAllocatedNode());
    // Call editSchedule again: selected containers are killed
    editPolicy.editSchedule();
    waitNumberOfLiveContainersFromApp(schedulerApp1, 1);
    // Do allocation for all nms
    for (int i = 0; i < 4; i++) {
      cs.handle(new NodeUpdateSchedulerEvent(rmNodes[i]));
    }
    waitNumberOfLiveContainersFromApp(schedulerApp1, 1);
    waitNumberOfLiveContainersFromApp(schedulerApp2, 3);
    waitNumberOfLiveContainersFromApp(schedulerApp3, 3);
    rm1.close();
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestRaceWhenRelogin.java 
 | 162 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.security;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.atomic.AtomicBoolean;
import javax.security.auth.kerberos.KerberosTicket;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslClient;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.junit.Before;
import org.junit.Test;
/**
 * Testcase for HADOOP-13433 that confirms that tgt will always be the first
 * ticket after relogin.
 */
public class TestRaceWhenRelogin extends KerberosSecurityTestcase {
  private int numThreads = 10;
  private String clientPrincipal = "client";
  private String serverProtocol = "server";
  private String[] serverProtocols;
  private String host = "localhost";
  private String serverPrincipal = serverProtocol + "/" + host;
  private String[] serverPrincipals;
  private File keytabFile;
  private Configuration conf = new Configuration();
  private Map<String, String> props;
  private UserGroupInformation ugi;
  @Before
  public void setUp() throws Exception {
    keytabFile = new File(getWorkDir(), "keytab");
    serverProtocols = new String[numThreads];
    serverPrincipals = new String[numThreads];
    for (int i = 0; i < numThreads; i++) {
      serverProtocols[i] = serverProtocol + i;
      serverPrincipals[i] = serverProtocols[i] + "/" + host;
    }
    String[] principals =
        Arrays.copyOf(serverPrincipals, serverPrincipals.length + 2);
    principals[numThreads] = serverPrincipal;
    principals[numThreads + 1] = clientPrincipal;
    getKdc().createPrincipal(keytabFile, principals);
    SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation.setShouldRenewImmediatelyForTests(true);
    props = new HashMap<String, String>();
    props.put(Sasl.QOP, QualityOfProtection.AUTHENTICATION.saslQop);
    ugi = UserGroupInformation.loginUserFromKeytabAndReturnUGI(clientPrincipal,
        keytabFile.getAbsolutePath());
  }
  private void relogin(AtomicBoolean pass) {
    for (int i = 0; i < 100; i++) {
      try {
        ugi.reloginFromKeytab();
      } catch (IOException e) {
      }
      KerberosTicket tgt = ugi.getSubject().getPrivateCredentials().stream()
          .filter(c -> c instanceof KerberosTicket).map(c -> (KerberosTicket) c)
          .findFirst().get();
      if (!tgt.getServer().getName().startsWith("krbtgt")) {
        pass.set(false);
        return;
      }
      try {
        Thread.sleep(50);
      } catch (InterruptedException e) {
      }
    }
  }
  private void getServiceTicket(AtomicBoolean running, String serverProtocol) {
    while (running.get()) {
      try {
        ugi.doAs(new PrivilegedExceptionAction<Void>() {
          @Override
          public Void run() throws Exception {
            SaslClient client = Sasl.createSaslClient(
                new String[] {AuthMethod.KERBEROS.getMechanismName()},
                clientPrincipal, serverProtocol, host, props, null);
            client.evaluateChallenge(new byte[0]);
            client.dispose();
            return null;
          }
        });
      } catch (Exception e) {
      }
      try {
        Thread.sleep(ThreadLocalRandom.current().nextInt(100));
      } catch (InterruptedException e) {
      }
    }
  }
  @Test
  public void test() throws InterruptedException, IOException {
    AtomicBoolean pass = new AtomicBoolean(true);
    Thread reloginThread = new Thread(() -> relogin(pass), "Relogin");
    AtomicBoolean running = new AtomicBoolean(true);
    Thread[] getServiceTicketThreads = new Thread[numThreads];
    for (int i = 0; i < numThreads; i++) {
      String serverProtocol = serverProtocols[i];
      getServiceTicketThreads[i] =
          new Thread(() -> getServiceTicket(running, serverProtocol),
              "GetServiceTicket-" + i);
    }
    for (Thread getServiceTicketThread : getServiceTicketThreads) {
      getServiceTicketThread.start();
    }
    reloginThread.start();
    reloginThread.join();
    running.set(false);
    for (Thread getServiceTicketThread : getServiceTicketThreads) {
      getServiceTicketThread.join();
    }
    assertTrue("tgt is not the first ticket after relogin", pass.get());
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestReadWriteDiskValidator.java 
 | 161 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.util;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import org.apache.hadoop.metrics2.MetricsSource;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.impl.MetricsCollectorImpl;
import org.apache.hadoop.metrics2.impl.MetricsRecords;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.junit.Before;
import org.junit.Test;
import org.junit.Assert;
import java.io.File;
import java.nio.file.Files;
import java.nio.file.Paths;
/**
 * The class to test {@link ReadWriteDiskValidator} and
 * {@link ReadWriteDiskValidatorMetrics}.
 */
public class TestReadWriteDiskValidator {
  private MetricsSystem ms;
  @Before
  public void setUp() {
    ms = DefaultMetricsSystem.instance();
  }
  @Test
  public void testReadWriteDiskValidator()
      throws DiskErrorException, InterruptedException {
    int count = 100;
    File testDir = new File(System.getProperty("test.build.data"));
    ReadWriteDiskValidator readWriteDiskValidator =
        (ReadWriteDiskValidator) DiskValidatorFactory.getInstance(
            ReadWriteDiskValidator.NAME);
    for (int i = 0; i < count; i++) {
      readWriteDiskValidator.checkStatus(testDir);
    }
    ReadWriteDiskValidatorMetrics metric =
        ReadWriteDiskValidatorMetrics.getMetric(testDir.toString());
    Assert.assertEquals("The count number of estimator in MutableQuantiles"
        + "metrics of file read is not right",
        metric.getFileReadQuantiles()[0].getEstimator().getCount(), count);
    Assert.assertEquals("The count number of estimator in MutableQuantiles"
        + "metrics of file write is not right",
        metric.getFileWriteQuantiles()[0].getEstimator().getCount(),
        count);
    MetricsSource source = ms.getSource(
        ReadWriteDiskValidatorMetrics.sourceName(testDir.toString()));
    MetricsCollectorImpl collector = new MetricsCollectorImpl();
    source.getMetrics(collector, true);
    MetricsRecords.assertMetric(collector.getRecords().get(0),
        "FailureCount", 0);
    MetricsRecords.assertMetric(collector.getRecords().get(0),
        "LastFailureTime", (long)0);
    // All MutableQuantiles haven't rolled over yet because the minimum
    // interval is 1 hours, so we just test if these metrics exist.
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0),
        "WriteLatency3600sNumOps");
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0),
        "WriteLatency3600s50thPercentileLatencyMicros");
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0),
        "WriteLatency86400sNumOps");
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0),
        "WriteLatency864000sNumOps");
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0),
        "ReadLatency3600sNumOps");
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0),
        "ReadLatency3600s50thPercentileLatencyMicros");
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0),
        "ReadLatency86400sNumOps");
    MetricsRecords.assertMetricNotNull(collector.getRecords().get(0),
        "ReadLatency864000sNumOps");
  }
  @Test
  public void testCheckFailures() throws Throwable {
    ReadWriteDiskValidator readWriteDiskValidator =
        (ReadWriteDiskValidator) DiskValidatorFactory.getInstance(
            ReadWriteDiskValidator.NAME);
    // create a temporary test directory under the system test directory
    File testDir = Files.createTempDirectory(
        Paths.get(System.getProperty("test.build.data")), "test").toFile();
    try {
      Shell.execCommand(Shell.getSetPermissionCommand("000", false,
          testDir.getAbsolutePath()));
    } catch (Exception e){
      testDir.delete();
      throw e;
    }
    try {
      readWriteDiskValidator.checkStatus(testDir);
      fail("Disk check should fail.");
    } catch (DiskErrorException e) {
      assertTrue(e.getMessage().equals("Disk Check failed!"));
    }
    MetricsSource source = ms.getSource(
        ReadWriteDiskValidatorMetrics.sourceName(testDir.toString()));
    MetricsCollectorImpl collector = new MetricsCollectorImpl();
    source.getMetrics(collector, true);
    try {
      readWriteDiskValidator.checkStatus(testDir);
      fail("Disk check should fail.");
    } catch (DiskErrorException e) {
      assertTrue(e.getMessage().equals("Disk Check failed!"));
    }
    source.getMetrics(collector, true);
    // verify the first metrics record
    MetricsRecords.assertMetric(collector.getRecords().get(0),
        "FailureCount", 1);
    Long lastFailureTime1 = (Long) MetricsRecords.getMetricValueByName(
        collector.getRecords().get(0), "LastFailureTime");
    // verify the second metrics record
    MetricsRecords.assertMetric(collector.getRecords().get(1),
        "FailureCount", 2);
    Long lastFailureTime2 = (Long) MetricsRecords.getMetricValueByName(
        collector.getRecords().get(1), "LastFailureTime");
    assertTrue("The first failure time should be less than the second one",
        lastFailureTime1 < lastFailureTime2);
    testDir.delete();
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AppPriorityACLConfigurationParser.java 
 | 219 
							 | 
	/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements.  See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership.  The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License.  You may obtain a copy of the License at
*
*     http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.util.ArrayList;
import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.Priority;
/**
 *
 * PriorityACLConfiguration class is used to parse Application Priority ACL
 * configuration from capcity-scheduler.xml
 */
public class AppPriorityACLConfigurationParser {
  private static final Log LOG = LogFactory
      .getLog(AppPriorityACLConfigurationParser.class);
  public enum AppPriorityACLKeyType {
    USER(1), GROUP(2), MAX_PRIORITY(3), DEFAULT_PRIORITY(4);
    private final int id;
    AppPriorityACLKeyType(int id) {
      this.id = id;
    }
    public int getId() {
      return this.id;
    }
  }
  public static final String PATTERN_FOR_PRIORITY_ACL = "\\[([^\\]]+)";
  @Private
  public static final String ALL_ACL = "*";
  @Private
  public static final String NONE_ACL = " ";
  public List<AppPriorityACLGroup> getPriorityAcl(Priority clusterMaxPriority,
      String aclString) {
    List<AppPriorityACLGroup> aclList = new ArrayList<AppPriorityACLGroup>();
    Matcher matcher = Pattern.compile(PATTERN_FOR_PRIORITY_ACL)
        .matcher(aclString);
    /*
     * Each ACL group will be separated by "[]". Syntax of each ACL group could
     * be like below "user=b1,b2 group=g1 max-priority=a2 default-priority=a1"
     * Ideally this means "for this given user/group, maximum possible priority
     * is a2 and if the user has not specified any priority, then it is a1."
     */
    while (matcher.find()) {
      // Get the first ACL sub-group.
      String aclSubGroup = matcher.group(1);
      if (aclSubGroup.trim().isEmpty()) {
        continue;
      }
      /*
       * Internal storage is PriorityACLGroup which stores each parsed priority
       * ACLs group. This will help while looking for a user to priority mapping
       * during app submission time. ACLs will be passed in below order only. 1.
       * user/group 2. max-priority 3. default-priority
       */
      AppPriorityACLGroup userPriorityACL = new AppPriorityACLGroup();
      // userAndGroupName will hold user acl and group acl as interim storage
      // since both user/group acl comes with separate key value pairs.
      List<StringBuilder> userAndGroupName = new ArrayList<>();
      for (String kvPair : aclSubGroup.trim().split(" +")) {
        /*
         * There are 3 possible options for key here: 1. user/group 2.
         * max-priority 3. default-priority
         */
        String[] splits = kvPair.split("=");
        // Ensure that each ACL sub string is key value pair separated by '='.
        if (splits != null && splits.length > 1) {
          parsePriorityACLType(userPriorityACL, splits, userAndGroupName);
        }
      }
      // If max_priority is higher to clusterMaxPriority, its better to
      // handle here.
      if (userPriorityACL.getMaxPriority().getPriority() > clusterMaxPriority
          .getPriority()) {
        LOG.warn("ACL configuration for '" + userPriorityACL.getMaxPriority()
            + "' is greater that cluster max priority. Resetting ACLs to "
            + clusterMaxPriority);
        userPriorityACL.setMaxPriority(
            Priority.newInstance(clusterMaxPriority.getPriority()));
      }
      AccessControlList acl = createACLStringForPriority(userAndGroupName);
      userPriorityACL.setACLList(acl);
      aclList.add(userPriorityACL);
    }
    return aclList;
  }
  /*
   * Parse different types of ACLs sub parts for on priority group and store in
   * a map for later processing.
   */
  private void parsePriorityACLType(AppPriorityACLGroup userPriorityACL,
      String[] splits, List<StringBuilder> userAndGroupName) {
    // Here splits will have the key value pair at index 0 and 1 respectively.
    // To parse all keys, its better to convert to PriorityACLConfig enum.
    AppPriorityACLKeyType aclType = AppPriorityACLKeyType
        .valueOf(StringUtils.toUpperCase(splits[0].trim()));
    switch (aclType) {
    case MAX_PRIORITY :
      userPriorityACL
          .setMaxPriority(Priority.newInstance(Integer.parseInt(splits[1])));
      break;
    case USER :
      userAndGroupName.add(getUserOrGroupACLStringFromConfig(splits[1]));
      break;
    case GROUP :
      userAndGroupName.add(getUserOrGroupACLStringFromConfig(splits[1]));
      break;
    case DEFAULT_PRIORITY :
      int defaultPriority = Integer.parseInt(splits[1]);
      Priority priority = (defaultPriority < 0)
          ? Priority.newInstance(0)
          : Priority.newInstance(defaultPriority);
      userPriorityACL.setDefaultPriority(priority);
      break;
    default:
      break;
    }
  }
  /*
   * This method will help to append different types of ACLs keys against one
   * priority. For eg,USER will be appended with GROUP as "user2,user4 group1".
   */
  private AccessControlList createACLStringForPriority(
      List<StringBuilder> acls) {
    String finalACL = "";
    String userACL = acls.get(0).toString();
    // If any of user/group is *, consider it as acceptable for all.
    // "user" is at index 0, and "group" is at index 1.
    if (userACL.trim().equals(ALL_ACL)) {
      finalACL = ALL_ACL;
    } else if (userACL.equals(NONE_ACL)) {
      finalACL = NONE_ACL;
    } else {
      // Get USER segment
      if (!userACL.trim().isEmpty()) {
        // skip last appended ","
        finalACL = acls.get(0).toString();
      }
      // Get GROUP segment if any
      if (acls.size() > 1) {
        String groupACL = acls.get(1).toString();
        if (!groupACL.trim().isEmpty()) {
          finalACL = finalACL + " "
              + acls.get(1).toString();
        }
      }
    }
    // Here ACL will look like "user1,user2 group" in ideal cases.
    return new AccessControlList(finalACL.trim());
  }
  /*
   * This method will help to append user/group acl string against given
   * priority. For example "user1,user2 group1,group2"
   */
  private StringBuilder getUserOrGroupACLStringFromConfig(String value) {
    // ACL strings could be generate for USER or GRUOP.
    // aclList in map contains two entries. 1. USER, 2. GROUP.
    StringBuilder aclTypeName = new StringBuilder();
    if (value.trim().equals(ALL_ACL)) {
      aclTypeName.setLength(0);
      aclTypeName.append(ALL_ACL);
      return aclTypeName;
    }
    aclTypeName.append(value.trim());
    return aclTypeName;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestOpportunisticContainerAllocatorAMService.java 
 | 453 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.server.resourcemanager;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.api.ApplicationMasterProtocolPB;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.AllocateResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.FinishApplicationMasterResponsePBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterRequestPBImpl;
import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RegisterApplicationMasterResponsePBImpl;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
import org.apache.hadoop.yarn.api.records.ExecutionType;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
import org.apache.hadoop.yarn.server.api.DistributedSchedulingAMProtocolPB;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.DistributedSchedulingAllocateRequest;
import org.apache.hadoop.yarn.server.api.protocolrecords.DistributedSchedulingAllocateResponse;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterDistributedSchedulingAMResponse;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest;
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.server.api.protocolrecords.RemoteNode;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.DistributedSchedulingAllocateRequestPBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.DistributedSchedulingAllocateResponsePBImpl;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterDistributedSchedulingAMResponsePBImpl;
import org.apache.hadoop.yarn.server.api.records.OpportunisticContainersStatus;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent;
import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
import org.apache.hadoop.yarn.server.scheduler.OpportunisticContainerContext;
import org.apache.hadoop.yarn.util.resource.Resources;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
/**
 * Test cases for {@link OpportunisticContainerAllocatorAMService}.
 */
public class TestOpportunisticContainerAllocatorAMService {
  private static final int GB = 1024;
  private MockRM rm;
  @Before
  public void createAndStartRM() {
    CapacitySchedulerConfiguration csConf =
        new CapacitySchedulerConfiguration();
    YarnConfiguration conf = new YarnConfiguration(csConf);
    conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
        ResourceScheduler.class);
    conf.setBoolean(
        YarnConfiguration.OPPORTUNISTIC_CONTAINER_ALLOCATION_ENABLED, true);
    conf.setInt(
        YarnConfiguration.NM_CONTAINER_QUEUING_SORTING_NODES_INTERVAL_MS, 100);
    rm = new MockRM(conf);
    rm.start();
  }
  @After
  public void stopRM() {
    if (rm != null) {
      rm.stop();
    }
  }
  @Test(timeout = 600000)
  public void testContainerPromoteAndDemoteBeforeContainerStart() throws Exception {
    HashMap<NodeId, MockNM> nodes = new HashMap<>();
    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
    nodes.put(nm1.getNodeId(), nm1);
    MockNM nm2 = new MockNM("h1:4321", 4096, rm.getResourceTrackerService());
    nodes.put(nm2.getNodeId(), nm2);
    MockNM nm3 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
    nodes.put(nm3.getNodeId(), nm3);
    MockNM nm4 = new MockNM("h2:4321", 4096, rm.getResourceTrackerService());
    nodes.put(nm4.getNodeId(), nm4);
    nm1.registerNode();
    nm2.registerNode();
    nm3.registerNode();
    nm4.registerNode();
    OpportunisticContainerAllocatorAMService amservice =
        (OpportunisticContainerAllocatorAMService) rm
            .getApplicationMasterService();
    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
    ApplicationAttemptId attemptId =
        app1.getCurrentAppAttempt().getAppAttemptId();
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
    ResourceScheduler scheduler = rm.getResourceScheduler();
    RMNode rmNode1 = rm.getRMContext().getRMNodes().get(nm1.getNodeId());
    RMNode rmNode2 = rm.getRMContext().getRMNodes().get(nm2.getNodeId());
    RMNode rmNode3 = rm.getRMContext().getRMNodes().get(nm3.getNodeId());
    RMNode rmNode4 = rm.getRMContext().getRMNodes().get(nm4.getNodeId());
    nm1.nodeHeartbeat(true);
    nm2.nodeHeartbeat(true);
    nm3.nodeHeartbeat(true);
    nm4.nodeHeartbeat(true);
    ((RMNodeImpl) rmNode1)
        .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100));
    ((RMNodeImpl) rmNode2)
        .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100));
    ((RMNodeImpl) rmNode3)
        .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100));
    ((RMNodeImpl) rmNode4)
        .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100));
    OpportunisticContainerContext ctxt = ((CapacityScheduler) scheduler)
        .getApplicationAttempt(attemptId).getOpportunisticContainerContext();
    // Send add and update node events to AM Service.
    amservice.handle(new NodeAddedSchedulerEvent(rmNode1));
    amservice.handle(new NodeAddedSchedulerEvent(rmNode2));
    amservice.handle(new NodeAddedSchedulerEvent(rmNode3));
    amservice.handle(new NodeAddedSchedulerEvent(rmNode4));
    amservice.handle(new NodeUpdateSchedulerEvent(rmNode1));
    amservice.handle(new NodeUpdateSchedulerEvent(rmNode2));
    amservice.handle(new NodeUpdateSchedulerEvent(rmNode3));
    amservice.handle(new NodeUpdateSchedulerEvent(rmNode4));
    // All nodes 1 - 4 will be applicable for scheduling.
    nm1.nodeHeartbeat(true);
    nm2.nodeHeartbeat(true);
    nm3.nodeHeartbeat(true);
    nm4.nodeHeartbeat(true);
    Thread.sleep(1000);
    QueueMetrics metrics = ((CapacityScheduler) scheduler).getRootQueue()
        .getMetrics();
    // Verify Metrics
    verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
    AllocateResponse allocateResponse = am1.allocate(
        Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1),
            "*", Resources.createResource(1 * GB), 2, true, null,
            ExecutionTypeRequest.newInstance(
                ExecutionType.OPPORTUNISTIC, true))),
        null);
    List<Container> allocatedContainers = allocateResponse
        .getAllocatedContainers();
    Assert.assertEquals(2, allocatedContainers.size());
    Container container = allocatedContainers.get(0);
    MockNM allocNode = nodes.get(container.getNodeId());
    MockNM sameHostDiffNode = null;
    for (NodeId n : nodes.keySet()) {
      if (n.getHost().equals(allocNode.getNodeId().getHost()) &&
          n.getPort() != allocNode.getNodeId().getPort()) {
        sameHostDiffNode = nodes.get(n);
      }
    }
    // Verify Metrics After OPP allocation (Nothing should change)
    verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
    am1.sendContainerUpdateRequest(
        Arrays.asList(UpdateContainerRequest.newInstance(0,
            container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE,
            null, ExecutionType.GUARANTEED)));
    // Node on same host should not result in allocation
    sameHostDiffNode.nodeHeartbeat(true);
    Thread.sleep(200);
    allocateResponse =  am1.allocate(new ArrayList<>(), new ArrayList<>());
    Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size());
    // Verify Metrics After OPP allocation (Nothing should change again)
    verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
    // Send Promotion req again... this should result in update error
    allocateResponse = am1.sendContainerUpdateRequest(
        Arrays.asList(UpdateContainerRequest.newInstance(0,
            container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE,
            null, ExecutionType.GUARANTEED)));
    Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size());
    Assert.assertEquals(1, allocateResponse.getUpdateErrors().size());
    Assert.assertEquals("UPDATE_OUTSTANDING_ERROR",
        allocateResponse.getUpdateErrors().get(0).getReason());
    Assert.assertEquals(container.getId(),
        allocateResponse.getUpdateErrors().get(0)
            .getUpdateContainerRequest().getContainerId());
    // Send Promotion req again with incorrect version...
    // this should also result in update error
    allocateResponse = am1.sendContainerUpdateRequest(
        Arrays.asList(UpdateContainerRequest.newInstance(1,
            container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE,
            null, ExecutionType.GUARANTEED)));
    Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size());
    Assert.assertEquals(1, allocateResponse.getUpdateErrors().size());
    Assert.assertEquals("INCORRECT_CONTAINER_VERSION_ERROR|1|0",
        allocateResponse.getUpdateErrors().get(0).getReason());
    Assert.assertEquals(container.getId(),
        allocateResponse.getUpdateErrors().get(0)
            .getUpdateContainerRequest().getContainerId());
    // Ensure after correct node heartbeats, we should get the allocation
    allocNode.nodeHeartbeat(true);
    Thread.sleep(200);
    allocateResponse =  am1.allocate(new ArrayList<>(), new ArrayList<>());
    Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
    Container uc =
        allocateResponse.getUpdatedContainers().get(0).getContainer();
    Assert.assertEquals(ExecutionType.GUARANTEED, uc.getExecutionType());
    Assert.assertEquals(uc.getId(), container.getId());
    Assert.assertEquals(uc.getVersion(), container.getVersion() + 1);
    // Verify Metrics After OPP allocation :
    // Allocated cores+mem should have increased, available should decrease
    verifyMetrics(metrics, 14336, 14, 2048, 2, 2);
    nm1.nodeHeartbeat(true);
    nm2.nodeHeartbeat(true);
    nm3.nodeHeartbeat(true);
    nm4.nodeHeartbeat(true);
    Thread.sleep(200);
    // Verify that the container is still in ACQUIRED state wrt the RM.
    RMContainer rmContainer = ((CapacityScheduler) scheduler)
        .getApplicationAttempt(
        uc.getId().getApplicationAttemptId()).getRMContainer(uc.getId());
    Assert.assertEquals(RMContainerState.ACQUIRED, rmContainer.getState());
    // Now demote the container back..
    allocateResponse = am1.sendContainerUpdateRequest(
        Arrays.asList(UpdateContainerRequest.newInstance(uc.getVersion(),
            uc.getId(), ContainerUpdateType.DEMOTE_EXECUTION_TYPE,
            null, ExecutionType.OPPORTUNISTIC)));
    // This should happen in the same heartbeat..
    Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
    uc = allocateResponse.getUpdatedContainers().get(0).getContainer();
    Assert.assertEquals(ExecutionType.OPPORTUNISTIC, uc.getExecutionType());
    Assert.assertEquals(uc.getId(), container.getId());
    Assert.assertEquals(uc.getVersion(), container.getVersion() + 2);
    // Verify Metrics After OPP allocation :
    // Everything should have reverted to what it was
    verifyMetrics(metrics, 15360, 15, 1024, 1, 1);
  }
  @Test(timeout = 60000)
  public void testContainerPromoteAfterContainerStart() throws Exception {
    HashMap<NodeId, MockNM> nodes = new HashMap<>();
    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
    nodes.put(nm1.getNodeId(), nm1);
    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
    nodes.put(nm2.getNodeId(), nm2);
    nm1.registerNode();
    nm2.registerNode();
    OpportunisticContainerAllocatorAMService amservice =
        (OpportunisticContainerAllocatorAMService) rm
            .getApplicationMasterService();
    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
    ApplicationAttemptId attemptId =
        app1.getCurrentAppAttempt().getAppAttemptId();
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
    ResourceScheduler scheduler = rm.getResourceScheduler();
    RMNode rmNode1 = rm.getRMContext().getRMNodes().get(nm1.getNodeId());
    RMNode rmNode2 = rm.getRMContext().getRMNodes().get(nm2.getNodeId());
    nm1.nodeHeartbeat(true);
    nm2.nodeHeartbeat(true);
    ((RMNodeImpl) rmNode1)
        .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100));
    ((RMNodeImpl) rmNode2)
        .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100));
    OpportunisticContainerContext ctxt = ((CapacityScheduler) scheduler)
        .getApplicationAttempt(attemptId).getOpportunisticContainerContext();
    // Send add and update node events to AM Service.
    amservice.handle(new NodeAddedSchedulerEvent(rmNode1));
    amservice.handle(new NodeAddedSchedulerEvent(rmNode2));
    amservice.handle(new NodeUpdateSchedulerEvent(rmNode1));
    amservice.handle(new NodeUpdateSchedulerEvent(rmNode2));
    // All nodes 1 to 2 will be applicable for scheduling.
    nm1.nodeHeartbeat(true);
    nm2.nodeHeartbeat(true);
    Thread.sleep(1000);
    QueueMetrics metrics = ((CapacityScheduler) scheduler).getRootQueue()
        .getMetrics();
    // Verify Metrics
    verifyMetrics(metrics, 7168, 7, 1024, 1, 1);
    AllocateResponse allocateResponse = am1.allocate(
        Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1),
            "*", Resources.createResource(1 * GB), 2, true, null,
            ExecutionTypeRequest.newInstance(
                ExecutionType.OPPORTUNISTIC, true))),
        null);
    List<Container> allocatedContainers = allocateResponse
        .getAllocatedContainers();
    Assert.assertEquals(2, allocatedContainers.size());
    Container container = allocatedContainers.get(0);
    MockNM allocNode = nodes.get(container.getNodeId());
    // Start Container in NM
    allocNode.nodeHeartbeat(Arrays.asList(
        ContainerStatus.newInstance(container.getId(),
            ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)),
        true);
    Thread.sleep(200);
    // Verify that container is actually running wrt the RM..
    RMContainer rmContainer = ((CapacityScheduler) scheduler)
        .getApplicationAttempt(
            container.getId().getApplicationAttemptId()).getRMContainer(
            container.getId());
    Assert.assertEquals(RMContainerState.RUNNING, rmContainer.getState());
    // Verify Metrics After OPP allocation (Nothing should change)
    verifyMetrics(metrics, 7168, 7, 1024, 1, 1);
    am1.sendContainerUpdateRequest(
        Arrays.asList(UpdateContainerRequest.newInstance(0,
            container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE,
            null, ExecutionType.GUARANTEED)));
    // Verify Metrics After OPP allocation (Nothing should change again)
    verifyMetrics(metrics, 7168, 7, 1024, 1, 1);
    // Send Promotion req again... this should result in update error
    allocateResponse = am1.sendContainerUpdateRequest(
        Arrays.asList(UpdateContainerRequest.newInstance(0,
            container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE,
            null, ExecutionType.GUARANTEED)));
    Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size());
    Assert.assertEquals(1, allocateResponse.getUpdateErrors().size());
    Assert.assertEquals("UPDATE_OUTSTANDING_ERROR",
        allocateResponse.getUpdateErrors().get(0).getReason());
    Assert.assertEquals(container.getId(),
        allocateResponse.getUpdateErrors().get(0)
            .getUpdateContainerRequest().getContainerId());
    // Start Container in NM
    allocNode.nodeHeartbeat(Arrays.asList(
        ContainerStatus.newInstance(container.getId(),
            ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)),
        true);
    Thread.sleep(200);
    allocateResponse =  am1.allocate(new ArrayList<>(), new ArrayList<>());
    Assert.assertEquals(1, allocateResponse.getUpdatedContainers().size());
    Container uc =
        allocateResponse.getUpdatedContainers().get(0).getContainer();
    Assert.assertEquals(ExecutionType.GUARANTEED, uc.getExecutionType());
    Assert.assertEquals(uc.getId(), container.getId());
    Assert.assertEquals(uc.getVersion(), container.getVersion() + 1);
    // Verify that the Container is still in RUNNING state wrt RM..
    rmContainer = ((CapacityScheduler) scheduler)
        .getApplicationAttempt(
            uc.getId().getApplicationAttemptId()).getRMContainer(uc.getId());
    Assert.assertEquals(RMContainerState.RUNNING, rmContainer.getState());
    // Verify Metrics After OPP allocation :
    // Allocated cores+mem should have increased, available should decrease
    verifyMetrics(metrics, 6144, 6, 2048, 2, 2);
  }
  @Test(timeout = 600000)
  public void testContainerPromoteAfterContainerComplete() throws Exception {
    HashMap<NodeId, MockNM> nodes = new HashMap<>();
    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
    nodes.put(nm1.getNodeId(), nm1);
    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
    nodes.put(nm2.getNodeId(), nm2);
    nm1.registerNode();
    nm2.registerNode();
    OpportunisticContainerAllocatorAMService amservice =
        (OpportunisticContainerAllocatorAMService) rm
            .getApplicationMasterService();
    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
    ApplicationAttemptId attemptId =
        app1.getCurrentAppAttempt().getAppAttemptId();
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
    ResourceScheduler scheduler = rm.getResourceScheduler();
    RMNode rmNode1 = rm.getRMContext().getRMNodes().get(nm1.getNodeId());
    RMNode rmNode2 = rm.getRMContext().getRMNodes().get(nm2.getNodeId());
    nm1.nodeHeartbeat(true);
    nm2.nodeHeartbeat(true);
    ((RMNodeImpl) rmNode1)
        .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100));
    ((RMNodeImpl) rmNode2)
        .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100));
    OpportunisticContainerContext ctxt = ((CapacityScheduler) scheduler)
        .getApplicationAttempt(attemptId).getOpportunisticContainerContext();
    // Send add and update node events to AM Service.
    amservice.handle(new NodeAddedSchedulerEvent(rmNode1));
    amservice.handle(new NodeAddedSchedulerEvent(rmNode2));
    amservice.handle(new NodeUpdateSchedulerEvent(rmNode1));
    amservice.handle(new NodeUpdateSchedulerEvent(rmNode2));
    // All nodes 1 to 2 will be applicable for scheduling.
    nm1.nodeHeartbeat(true);
    nm2.nodeHeartbeat(true);
    Thread.sleep(1000);
    QueueMetrics metrics = ((CapacityScheduler) scheduler).getRootQueue()
        .getMetrics();
    // Verify Metrics
    verifyMetrics(metrics, 7168, 7, 1024, 1, 1);
    AllocateResponse allocateResponse = am1.allocate(
        Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1),
            "*", Resources.createResource(1 * GB), 2, true, null,
            ExecutionTypeRequest.newInstance(
                ExecutionType.OPPORTUNISTIC, true))),
        null);
    List<Container> allocatedContainers = allocateResponse
        .getAllocatedContainers();
    Assert.assertEquals(2, allocatedContainers.size());
    Container container = allocatedContainers.get(0);
    MockNM allocNode = nodes.get(container.getNodeId());
    // Start Container in NM
    allocNode.nodeHeartbeat(Arrays.asList(
        ContainerStatus.newInstance(container.getId(),
            ExecutionType.OPPORTUNISTIC, ContainerState.RUNNING, "", 0)),
        true);
    Thread.sleep(200);
    // Verify that container is actually running wrt the RM..
    RMContainer rmContainer = ((CapacityScheduler) scheduler)
        .getApplicationAttempt(
            container.getId().getApplicationAttemptId()).getRMContainer(
            container.getId());
    Assert.assertEquals(RMContainerState.RUNNING, rmContainer.getState());
    // Container Completed in the NM
    allocNode.nodeHeartbeat(Arrays.asList(
        ContainerStatus.newInstance(container.getId(),
            ExecutionType.OPPORTUNISTIC, ContainerState.COMPLETE, "", 0)),
        true);
    Thread.sleep(200);
    // Verify that container has been removed..
    rmContainer = ((CapacityScheduler) scheduler)
        .getApplicationAttempt(
            container.getId().getApplicationAttemptId()).getRMContainer(
            container.getId());
    Assert.assertNull(rmContainer);
    // Verify Metrics After OPP allocation (Nothing should change)
    verifyMetrics(metrics, 7168, 7, 1024, 1, 1);
    // Send Promotion req... this should result in update error
    // Since the container doesn't exist anymore..
    allocateResponse = am1.sendContainerUpdateRequest(
        Arrays.asList(UpdateContainerRequest.newInstance(0,
            container.getId(), ContainerUpdateType.PROMOTE_EXECUTION_TYPE,
            null, ExecutionType.GUARANTEED)));
    Assert.assertEquals(1,
        allocateResponse.getCompletedContainersStatuses().size());
    Assert.assertEquals(container.getId(),
        allocateResponse.getCompletedContainersStatuses().get(0)
            .getContainerId());
    Assert.assertEquals(0, allocateResponse.getUpdatedContainers().size());
    Assert.assertEquals(1, allocateResponse.getUpdateErrors().size());
    Assert.assertEquals("INVALID_CONTAINER_ID",
        allocateResponse.getUpdateErrors().get(0).getReason());
    Assert.assertEquals(container.getId(),
        allocateResponse.getUpdateErrors().get(0)
            .getUpdateContainerRequest().getContainerId());
    // Verify Metrics After OPP allocation (Nothing should change again)
    verifyMetrics(metrics, 7168, 7, 1024, 1, 1);
  }
  private void verifyMetrics(QueueMetrics metrics, long availableMB,
      int availableVirtualCores, long allocatedMB,
      int allocatedVirtualCores, int allocatedContainers) {
    Assert.assertEquals(availableMB, metrics.getAvailableMB());
    Assert.assertEquals(availableVirtualCores, metrics.getAvailableVirtualCores());
    Assert.assertEquals(allocatedMB, metrics.getAllocatedMB());
    Assert.assertEquals(allocatedVirtualCores, metrics.getAllocatedVirtualCores());
    Assert.assertEquals(allocatedContainers, metrics.getAllocatedContainers());
  }
  @Test(timeout = 60000)
  public void testNodeRemovalDuringAllocate() throws Exception {
    MockNM nm1 = new MockNM("h1:1234", 4096, rm.getResourceTrackerService());
    MockNM nm2 = new MockNM("h2:1234", 4096, rm.getResourceTrackerService());
    nm1.registerNode();
    nm2.registerNode();
    OpportunisticContainerAllocatorAMService amservice =
        (OpportunisticContainerAllocatorAMService) rm
            .getApplicationMasterService();
    RMApp app1 = rm.submitApp(1 * GB, "app", "user", null, "default");
    ApplicationAttemptId attemptId =
        app1.getCurrentAppAttempt().getAppAttemptId();
    MockAM am1 = MockRM.launchAndRegisterAM(app1, rm, nm2);
    ResourceScheduler scheduler = rm.getResourceScheduler();
    RMNode rmNode1 = rm.getRMContext().getRMNodes().get(nm1.getNodeId());
    RMNode rmNode2 = rm.getRMContext().getRMNodes().get(nm2.getNodeId());
    nm1.nodeHeartbeat(true);
    nm2.nodeHeartbeat(true);
    ((RMNodeImpl) rmNode1)
        .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100));
    ((RMNodeImpl) rmNode2)
        .setOpportunisticContainersStatus(getOppurtunisticStatus(-1, 100));
    OpportunisticContainerContext ctxt = ((CapacityScheduler) scheduler)
        .getApplicationAttempt(attemptId).getOpportunisticContainerContext();
    // Send add and update node events to AM Service.
    amservice.handle(new NodeAddedSchedulerEvent(rmNode1));
    amservice.handle(new NodeAddedSchedulerEvent(rmNode2));
    amservice.handle(new NodeUpdateSchedulerEvent(rmNode1));
    amservice.handle(new NodeUpdateSchedulerEvent(rmNode2));
    // Both node 1 and node 2 will be applicable for scheduling.
    for (int i = 0; i < 10; i++) {
      am1.allocate(
          Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1),
              "*", Resources.createResource(1 * GB), 2)),
          null);
      if (ctxt.getNodeMap().size() == 2) {
        break;
      }
      Thread.sleep(50);
    }
    Assert.assertEquals(2, ctxt.getNodeMap().size());
    // Remove node from scheduler but not from AM Service.
    scheduler.handle(new NodeRemovedSchedulerEvent(rmNode1));
    // After removal of node 1, only 1 node will be applicable for scheduling.
    for (int i = 0; i < 10; i++) {
      try {
        am1.allocate(
            Arrays.asList(ResourceRequest.newInstance(Priority.newInstance(1),
                "*", Resources.createResource(1 * GB), 2)),
            null);
      } catch (Exception e) {
        Assert.fail("Allocate request should be handled on node removal");
      }
      if (ctxt.getNodeMap().size() == 1) {
        break;
      }
      Thread.sleep(50);
    }
    Assert.assertEquals(1, ctxt.getNodeMap().size());
  }
  private OpportunisticContainersStatus getOppurtunisticStatus(int waitTime,
      int queueLength) {
    OpportunisticContainersStatus status1 =
        Mockito.mock(OpportunisticContainersStatus.class);
    Mockito.when(status1.getEstimatedQueueWaitTime()).thenReturn(waitTime);
    Mockito.when(status1.getWaitQueueLength()).thenReturn(queueLength);
    return status1;
  }
  // Test if the OpportunisticContainerAllocatorAMService can handle both
  // DSProtocol as well as AMProtocol clients
  @Test
  public void testRPCWrapping() throws Exception {
    Configuration conf = new Configuration();
    conf.set(YarnConfiguration.IPC_RPC_IMPL, HadoopYarnProtoRPC.class
        .getName());
    YarnRPC rpc = YarnRPC.create(conf);
    String bindAddr = "localhost:0";
    InetSocketAddress addr = NetUtils.createSocketAddr(bindAddr);
    conf.setSocketAddr(YarnConfiguration.RM_SCHEDULER_ADDRESS, addr);
    final RecordFactory factory = RecordFactoryProvider.getRecordFactory(null);
    final RMContext rmContext = new RMContextImpl() {
      @Override
      public AMLivelinessMonitor getAMLivelinessMonitor() {
        return null;
      }
      @Override
      public Configuration getYarnConfiguration() {
        return new YarnConfiguration();
      }
      @Override
      public RMContainerTokenSecretManager getContainerTokenSecretManager() {
        return new RMContainerTokenSecretManager(conf);
      }
    };
    Container c = factory.newRecordInstance(Container.class);
    c.setExecutionType(ExecutionType.OPPORTUNISTIC);
    c.setId(
        ContainerId.newContainerId(
            ApplicationAttemptId.newInstance(
                ApplicationId.newInstance(12345, 1), 2), 3));
    AllocateRequest allReq =
        (AllocateRequestPBImpl)factory.newRecordInstance(AllocateRequest.class);
    allReq.setAskList(Arrays.asList(
        ResourceRequest.newInstance(Priority.UNDEFINED, "a",
            Resource.newInstance(1, 2), 1, true, "exp",
            ExecutionTypeRequest.newInstance(
                ExecutionType.OPPORTUNISTIC, true))));
    OpportunisticContainerAllocatorAMService service =
        createService(factory, rmContext, c);
    conf.setBoolean(YarnConfiguration.DIST_SCHEDULING_ENABLED, true);
    Server server = service.getServer(rpc, conf, addr, null);
    server.start();
    // Verify that the OpportunisticContainerAllocatorAMSercvice can handle
    // vanilla ApplicationMasterProtocol clients
    RPC.setProtocolEngine(conf, ApplicationMasterProtocolPB.class,
        ProtobufRpcEngine.class);
    ApplicationMasterProtocolPB ampProxy =
        RPC.getProxy(ApplicationMasterProtocolPB
            .class, 1, NetUtils.getConnectAddress(server), conf);
    RegisterApplicationMasterResponse regResp =
        new RegisterApplicationMasterResponsePBImpl(
            ampProxy.registerApplicationMaster(null,
                ((RegisterApplicationMasterRequestPBImpl)factory
                    .newRecordInstance(
                        RegisterApplicationMasterRequest.class)).getProto()));
    Assert.assertEquals("dummyQueue", regResp.getQueue());
    FinishApplicationMasterResponse finishResp =
        new FinishApplicationMasterResponsePBImpl(
            ampProxy.finishApplicationMaster(null,
                ((FinishApplicationMasterRequestPBImpl)factory
                    .newRecordInstance(
                        FinishApplicationMasterRequest.class)).getProto()
            ));
    Assert.assertEquals(false, finishResp.getIsUnregistered());
    AllocateResponse allocResp =
        new AllocateResponsePBImpl(
            ampProxy.allocate(null,
                ((AllocateRequestPBImpl)factory
                    .newRecordInstance(AllocateRequest.class)).getProto())
        );
    List<Container> allocatedContainers = allocResp.getAllocatedContainers();
    Assert.assertEquals(1, allocatedContainers.size());
    Assert.assertEquals(ExecutionType.OPPORTUNISTIC,
        allocatedContainers.get(0).getExecutionType());
    Assert.assertEquals(12345, allocResp.getNumClusterNodes());
    // Verify that the DistrubutedSchedulingService can handle the
    // DistributedSchedulingAMProtocol clients as well
    RPC.setProtocolEngine(conf, DistributedSchedulingAMProtocolPB.class,
        ProtobufRpcEngine.class);
    DistributedSchedulingAMProtocolPB dsProxy =
        RPC.getProxy(DistributedSchedulingAMProtocolPB
            .class, 1, NetUtils.getConnectAddress(server), conf);
    RegisterDistributedSchedulingAMResponse dsRegResp =
        new RegisterDistributedSchedulingAMResponsePBImpl(
            dsProxy.registerApplicationMasterForDistributedScheduling(null,
                ((RegisterApplicationMasterRequestPBImpl)factory
                    .newRecordInstance(RegisterApplicationMasterRequest.class))
                    .getProto()));
    Assert.assertEquals(54321l, dsRegResp.getContainerIdStart());
    Assert.assertEquals(4,
        dsRegResp.getMaxContainerResource().getVirtualCores());
    Assert.assertEquals(1024,
        dsRegResp.getMinContainerResource().getMemorySize());
    Assert.assertEquals(2,
        dsRegResp.getIncrContainerResource().getVirtualCores());
    DistributedSchedulingAllocateRequestPBImpl distAllReq =
        (DistributedSchedulingAllocateRequestPBImpl)factory.newRecordInstance(
            DistributedSchedulingAllocateRequest.class);
    distAllReq.setAllocateRequest(allReq);
    distAllReq.setAllocatedContainers(Arrays.asList(c));
    DistributedSchedulingAllocateResponse dsAllocResp =
        new DistributedSchedulingAllocateResponsePBImpl(
            dsProxy.allocateForDistributedScheduling(null,
                distAllReq.getProto()));
    Assert.assertEquals(
        "h1", dsAllocResp.getNodesForScheduling().get(0).getNodeId().getHost());
    FinishApplicationMasterResponse dsfinishResp =
        new FinishApplicationMasterResponsePBImpl(
            dsProxy.finishApplicationMaster(null,
                ((FinishApplicationMasterRequestPBImpl) factory
                    .newRecordInstance(FinishApplicationMasterRequest.class))
                    .getProto()));
    Assert.assertEquals(
        false, dsfinishResp.getIsUnregistered());
  }
  private OpportunisticContainerAllocatorAMService createService(
      final RecordFactory factory, final RMContext rmContext,
      final Container c) {
    return new OpportunisticContainerAllocatorAMService(rmContext, null) {
      @Override
      public RegisterApplicationMasterResponse registerApplicationMaster(
          RegisterApplicationMasterRequest request) throws
          YarnException, IOException {
        RegisterApplicationMasterResponse resp = factory.newRecordInstance(
            RegisterApplicationMasterResponse.class);
        // Dummy Entry to Assert that we get this object back
        resp.setQueue("dummyQueue");
        return resp;
      }
      @Override
      public FinishApplicationMasterResponse finishApplicationMaster(
          FinishApplicationMasterRequest request) throws YarnException,
          IOException {
        FinishApplicationMasterResponse resp = factory.newRecordInstance(
            FinishApplicationMasterResponse.class);
        // Dummy Entry to Assert that we get this object back
        resp.setIsUnregistered(false);
        return resp;
      }
      @Override
      public AllocateResponse allocate(AllocateRequest request) throws
          YarnException, IOException {
        AllocateResponse response = factory.newRecordInstance(
            AllocateResponse.class);
        response.setNumClusterNodes(12345);
        response.setAllocatedContainers(Arrays.asList(c));
        return response;
      }
      @Override
      public RegisterDistributedSchedulingAMResponse
          registerApplicationMasterForDistributedScheduling(
          RegisterApplicationMasterRequest request)
          throws YarnException, IOException {
        RegisterDistributedSchedulingAMResponse resp = factory
            .newRecordInstance(RegisterDistributedSchedulingAMResponse.class);
        resp.setContainerIdStart(54321L);
        resp.setMaxContainerResource(Resource.newInstance(4096, 4));
        resp.setMinContainerResource(Resource.newInstance(1024, 1));
        resp.setIncrContainerResource(Resource.newInstance(2048, 2));
        return resp;
      }
      @Override
      public DistributedSchedulingAllocateResponse
          allocateForDistributedScheduling(
          DistributedSchedulingAllocateRequest request)
          throws YarnException, IOException {
        List<ResourceRequest> askList =
            request.getAllocateRequest().getAskList();
        List<Container> allocatedContainers = request.getAllocatedContainers();
        Assert.assertEquals(1, allocatedContainers.size());
        Assert.assertEquals(ExecutionType.OPPORTUNISTIC,
            allocatedContainers.get(0).getExecutionType());
        Assert.assertEquals(1, askList.size());
        Assert.assertTrue(askList.get(0)
            .getExecutionTypeRequest().getEnforceExecutionType());
        DistributedSchedulingAllocateResponse resp = factory
            .newRecordInstance(DistributedSchedulingAllocateResponse.class);
        resp.setNodesForScheduling(
            Arrays.asList(RemoteNode.newInstance(
                NodeId.newInstance("h1", 1234), "http://h1:4321")));
        return resp;
      }
    };
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReadWriteDiskValidatorMetrics.java 
 | 170 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.util;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.metrics2.MetricsInfo;
import org.apache.hadoop.metrics2.MetricsSystem;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.lib.*;
import java.util.HashMap;
import java.util.Map;
import static org.apache.hadoop.metrics2.lib.Interns.info;
/**
 * The metrics for a directory generated by {@link ReadWriteDiskValidator}.
 */
@InterfaceAudience.Private
public class ReadWriteDiskValidatorMetrics {
  @Metric("# of disk failure") MutableCounterInt failureCount;
  @Metric("Time of last failure") MutableGaugeLong lastFailureTime;
  private final MetricsRegistry registry;
  private static final MetricsInfo RECORD_INFO =
      info("ReadWriteDiskValidatorMetrics", "Metrics for the DiskValidator");
  private final int[] quantileIntervals = new int[] {
      60 * 60, // 1h
      24 * 60 * 60, //1 day
      10 * 24 * 60 * 60 //10 day
  };
  private final MutableQuantiles[] fileReadQuantiles;
  private final MutableQuantiles[] fileWriteQuantiles;
  public ReadWriteDiskValidatorMetrics() {
    registry = new MetricsRegistry(RECORD_INFO);
    fileReadQuantiles = new MutableQuantiles[quantileIntervals.length];
    for (int i = 0; i < fileReadQuantiles.length; i++) {
      int interval = quantileIntervals[i];
      fileReadQuantiles[i] = registry.newQuantiles(
          "readLatency" + interval + "s",
          "File read latency", "Ops", "latencyMicros", interval);
    }
    fileWriteQuantiles = new MutableQuantiles[quantileIntervals.length];
    for (int i = 0; i < fileWriteQuantiles.length; i++) {
      int interval = quantileIntervals[i];
      fileWriteQuantiles[i] = registry.newQuantiles(
          "writeLatency" + interval + "s",
          "File write latency", "Ops", "latencyMicros", interval);
    }
  }
  /**
   * Simple metrics cache to help prevent re-registrations and help to access
   * metrics.
   */
  protected final static Map<String, ReadWriteDiskValidatorMetrics> DIR_METRICS
      = new HashMap<>();
  /**
   * Get a metric by given directory name.
   *
   * @param dirName directory name
   * @return the metric
   */
  public synchronized static ReadWriteDiskValidatorMetrics getMetric(
      String dirName) {
    MetricsSystem ms = DefaultMetricsSystem.instance();
    ReadWriteDiskValidatorMetrics metrics = DIR_METRICS.get(dirName);
    if (metrics == null) {
      metrics = new ReadWriteDiskValidatorMetrics();
      // Register with the MetricsSystems
      if (ms != null) {
        metrics = ms.register(sourceName(dirName),
            "Metrics for directory: " + dirName, metrics);
      }
      DIR_METRICS.put(dirName, metrics);
    }
    return metrics;
  }
  /**
   * Add the file write latency to {@link MutableQuantiles} metrics.
   *
   * @param writeLatency file write latency in microseconds
   */
  public void addWriteFileLatency(long writeLatency) {
    if (fileWriteQuantiles != null) {
      for (MutableQuantiles q : fileWriteQuantiles) {
        q.add(writeLatency);
      }
    }
  }
  /**
   * Add the file read latency to {@link MutableQuantiles} metrics.
   *
   * @param readLatency file read latency in microseconds
   */
  public void addReadFileLatency(long readLatency) {
    if (fileReadQuantiles!= null) {
      for (MutableQuantiles q : fileReadQuantiles) {
        q.add(readLatency);
      }
    }
  }
  /**
   * Get a source name by given directory name.
   *
   * @param dirName directory name
   * @return the source name
   */
  protected static String sourceName(String dirName) {
    StringBuilder sb = new StringBuilder(RECORD_INFO.name());
    sb.append(",dir=").append(dirName);
    return sb.toString();
  }
  /**
   * Increase the failure count and update the last failure timestamp.
   */
  public void diskCheckFailed() {
    failureCount.incr();
    lastFailureTime.set(System.nanoTime());
  }
  /**
   * Get {@link MutableQuantiles} metrics for the file read time.
   *
   * @return {@link MutableQuantiles} metrics for the file read time
   */
  @VisibleForTesting
  protected MutableQuantiles[] getFileReadQuantiles() {
    return fileReadQuantiles;
  }
  /**
   * Get {@link MutableQuantiles} metrics for the file write time.
   *
   * @return {@link MutableQuantiles} metrics for the file write time
   */
  @VisibleForTesting
  protected MutableQuantiles[] getFileWriteQuantiles() {
    return fileWriteQuantiles;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java 
 | 510 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
import com.google.common.collect.HashBasedTable;
import com.google.common.collect.Table;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
import org.apache.hadoop.yarn.util.resource.Resources;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
public class QueuePriorityContainerCandidateSelector
    extends PreemptionCandidatesSelector {
  private static final Log LOG =
      LogFactory.getLog(QueuePriorityContainerCandidateSelector.class);
  // Configured timeout before doing reserved container preemption
  private long minTimeout;
  // Allow move reservation around for better placement?
  private boolean allowMoveReservation;
  // All the reserved containers of the system which could possible preempt from
  // queue with lower priorities
  private List<RMContainer> reservedContainers;
  // From -> To
  // A digraph to represent if one queue has higher priority than another.
  // For example, a->b means queue=a has higher priority than queue=b
  private Table<String, String, Boolean> priorityDigraph =
      HashBasedTable.create();
  private Resource clusterResource;
  private Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates;
  private Resource totalPreemptionAllowed;
  // A cached scheduler node map, will be refreshed each round.
  private Map<NodeId, TempSchedulerNode> tempSchedulerNodeMap = new HashMap<>();
  // Have we touched (make any changes to the node) for this round
  // Once a node is touched, we will not try to move reservations to the node
  private Set<NodeId> touchedNodes;
  // Resource which marked to preempt from other queues.
  // <Queue, Partition, Resource-marked-to-be-preempted-from-other-queue>
  private Table<String, String, Resource> toPreemptedFromOtherQueues =
      HashBasedTable.create();
  private final Comparator<RMContainer>
      CONTAINER_CREATION_TIME_COMPARATOR = new Comparator<RMContainer>() {
    @Override
    public int compare(RMContainer o1, RMContainer o2) {
      if (preemptionAllowed(o1.getQueueName(), o2.getQueueName())) {
        return -1;
      } else if (preemptionAllowed(o2.getQueueName(), o1.getQueueName())) {
        return 1;
      }
      // If two queues cannot preempt each other, compare creation time.
      return Long.compare(o1.getCreationTime(), o2.getCreationTime());
    }
  };
  QueuePriorityContainerCandidateSelector(
      CapacitySchedulerPreemptionContext preemptionContext) {
    super(preemptionContext);
    // Initialize parameters
    CapacitySchedulerConfiguration csc =
        preemptionContext.getScheduler().getConfiguration();
    minTimeout = csc.getPUOrderingPolicyUnderUtilizedPreemptionDelay();
    allowMoveReservation =
        csc.getPUOrderingPolicyUnderUtilizedPreemptionMoveReservation();
  }
  private List<TempQueuePerPartition> getPathToRoot(TempQueuePerPartition tq) {
    List<TempQueuePerPartition> list = new ArrayList<>();
    while (tq != null) {
      list.add(tq);
      tq = tq.parent;
    }
    return list;
  }
  private void intializePriorityDigraph() {
    LOG.info("Initializing priority preemption directed graph:");
    // Make sure we iterate all leaf queue combinations
    for (String q1 : preemptionContext.getLeafQueueNames()) {
      for (String q2 : preemptionContext.getLeafQueueNames()) {
        // Make sure we only calculate each combination once instead of all
        // permutations
        if (q1.compareTo(q2) < 0) {
          TempQueuePerPartition tq1 = preemptionContext.getQueueByPartition(q1,
              RMNodeLabelsManager.NO_LABEL);
          TempQueuePerPartition tq2 = preemptionContext.getQueueByPartition(q2,
              RMNodeLabelsManager.NO_LABEL);
          List<TempQueuePerPartition> path1 = getPathToRoot(tq1);
          List<TempQueuePerPartition> path2 = getPathToRoot(tq2);
          // Get direct ancestor below LCA (Lowest common ancestor)
          int i = path1.size() - 1;
          int j = path2.size() - 1;
          while (path1.get(i).queueName.equals(path2.get(j).queueName)) {
            i--;
            j--;
          }
          // compare priority of path1[i] and path2[j]
          int p1 = path1.get(i).relativePriority;
          int p2 = path2.get(j).relativePriority;
          if (p1 < p2) {
            priorityDigraph.put(q2, q1, true);
            if (LOG.isDebugEnabled()) {
              LOG.info("- Added priority ordering edge: " + q2 + " >> " + q1);
            }
          } else if (p2 < p1) {
            priorityDigraph.put(q1, q2, true);
            if (LOG.isDebugEnabled()) {
              LOG.info("- Added priority ordering edge: " + q1 + " >> " + q2);
            }
          }
        }
      }
    }
  }
  /**
   * Do we allow demandingQueue preempt resource from toBePreemptedQueue
   *
   * @param demandingQueue demandingQueue
   * @param toBePreemptedQueue toBePreemptedQueue
   * @return can/cannot
   */
  private boolean preemptionAllowed(String demandingQueue,
      String toBePreemptedQueue) {
    return priorityDigraph.contains(demandingQueue,
        toBePreemptedQueue);
  }
  /**
   * Can we preempt enough resource for given:
   *
   * @param requiredResource askedResource
   * @param demandingQueue demandingQueue
   * @param schedulerNode node
   * @param lookingForNewReservationPlacement Are we trying to look for move
   *        reservation to the node
   * @param newlySelectedContainers newly selected containers, will be set when
   *        we can preempt enough resources from the node.
   *
   * @return can/cannot
   */
  private boolean canPreemptEnoughResourceForAsked(Resource requiredResource,
      String demandingQueue, FiCaSchedulerNode schedulerNode,
      boolean lookingForNewReservationPlacement,
      List<RMContainer> newlySelectedContainers) {
    // Do not check touched nodes again.
    if (touchedNodes.contains(schedulerNode.getNodeID())) {
      return false;
    }
    TempSchedulerNode node = tempSchedulerNodeMap.get(schedulerNode.getNodeID());
    if (null == node) {
      node = TempSchedulerNode.fromSchedulerNode(schedulerNode);
      tempSchedulerNodeMap.put(schedulerNode.getNodeID(), node);
    }
    if (null != schedulerNode.getReservedContainer()
        && lookingForNewReservationPlacement) {
      // Node reserved by the others, skip this node
      // We will not try to move the reservation to node which reserved already.
      return false;
    }
    // Need to preemption = asked - (node.total - node.allocated)
    Resource lacking = Resources.subtract(requiredResource, Resources
        .subtract(node.getTotalResource(), node.getAllocatedResource()));
    // On each host, simply check if we could preempt containers from
    // lower-prioritized queues or not
    List<RMContainer> runningContainers = node.getRunningContainers();
    Collections.sort(runningContainers, CONTAINER_CREATION_TIME_COMPARATOR);
    // First of all, consider already selected containers
    for (RMContainer runningContainer : runningContainers) {
      if (CapacitySchedulerPreemptionUtils.isContainerAlreadySelected(
          runningContainer, selectedCandidates)) {
        Resources.subtractFrom(lacking,
            runningContainer.getAllocatedResource());
      }
    }
    // If we already can allocate the reserved container after preemption,
    // skip following steps
    if (Resources.fitsIn(rc, clusterResource, lacking,
        Resources.none())) {
      return true;
    }
    Resource allowed = Resources.clone(totalPreemptionAllowed);
    Resource selected = Resources.createResource(0);
    for (RMContainer runningContainer : runningContainers) {
      if (CapacitySchedulerPreemptionUtils.isContainerAlreadySelected(
          runningContainer, selectedCandidates)) {
        // ignore selected containers
        continue;
      }
      // Only preempt resource from queue with lower priority
      if (!preemptionAllowed(demandingQueue,
          runningContainer.getQueueName())) {
        continue;
      }
      // Don't preempt AM container
      if (runningContainer.isAMContainer()) {
        continue;
      }
      // Not allow to preempt more than limit
      if (Resources.greaterThanOrEqual(rc, clusterResource, allowed,
          runningContainer.getAllocatedResource())) {
        Resources.subtractFrom(allowed,
            runningContainer.getAllocatedResource());
        Resources.subtractFrom(lacking,
            runningContainer.getAllocatedResource());
        Resources.addTo(selected, runningContainer.getAllocatedResource());
        if (null != newlySelectedContainers) {
          newlySelectedContainers.add(runningContainer);
        }
      }
      // Lacking <= 0 means we can allocate the reserved container
      if (Resources.fitsIn(rc, clusterResource, lacking, Resources.none())) {
        return true;
      }
    }
    return false;
  }
  private boolean preChecksForMovingReservedContainerToNode(
      RMContainer reservedContainer, FiCaSchedulerNode newNode) {
    // Don't do this if it has hard-locality preferences
    if (reservedContainer.getReservedSchedulerKey().getContainerToUpdate()
        != null) {
      // This means a container update request (like increase / promote)
      return false;
    }
    // For normal requests
    FiCaSchedulerApp app =
        preemptionContext.getScheduler().getApplicationAttempt(
            reservedContainer.getApplicationAttemptId());
    if (!app.getAppSchedulingInfo().canDelayTo(
        reservedContainer.getAllocatedSchedulerKey(), ResourceRequest.ANY)) {
      // This is a hard locality request
      return false;
    }
    // Check if newNode's partition matches requested partition
    if (!StringUtils.equals(reservedContainer.getNodeLabelExpression(),
        newNode.getPartition())) {
      return false;
    }
    return true;
  }
  private void tryToMakeBetterReservationPlacement(
      RMContainer reservedContainer,
      List<FiCaSchedulerNode> allSchedulerNodes) {
    for (FiCaSchedulerNode targetNode : allSchedulerNodes) {
      // Precheck if we can move the rmContainer to the new targetNode
      if (!preChecksForMovingReservedContainerToNode(reservedContainer,
          targetNode)) {
        continue;
      }
      if (canPreemptEnoughResourceForAsked(
          reservedContainer.getReservedResource(),
          reservedContainer.getQueueName(), targetNode, true, null)) {
        NodeId fromNode = reservedContainer.getNodeId();
        // We can place container to this targetNode, so just go ahead and notify
        // scheduler
        if (preemptionContext.getScheduler().moveReservedContainer(
            reservedContainer, targetNode)) {
          LOG.info("Successfully moved reserved container=" + reservedContainer
              .getContainerId() + " from targetNode=" + fromNode
              + " to targetNode=" + targetNode.getNodeID());
          touchedNodes.add(targetNode.getNodeID());
        }
      }
    }
  }
  /**
   * Do we allow the demanding queue preempt resource from other queues?
   * A satisfied queue is not allowed to preempt resource from other queues.
   * @param demandingQueue
   * @return allowed/not
   */
  private boolean isQueueSatisfied(String demandingQueue,
      String partition) {
    TempQueuePerPartition tq = preemptionContext.getQueueByPartition(
        demandingQueue, partition);
    if (null == tq) {
      return false;
    }
    Resource guaranteed = tq.getGuaranteed();
    Resource usedDeductReservd = Resources.subtract(tq.getUsed(),
        tq.getReserved());
    Resource markedToPreemptFromOtherQueue = toPreemptedFromOtherQueues.get(
        demandingQueue, partition);
    if (null == markedToPreemptFromOtherQueue) {
      markedToPreemptFromOtherQueue = Resources.none();
    }
    // return Used - reserved + to-preempt-from-other-queue >= guaranteed
    boolean flag = Resources.greaterThanOrEqual(rc, clusterResource,
        Resources.add(usedDeductReservd, markedToPreemptFromOtherQueue),
        guaranteed);
    return flag;
  }
  private void incToPreempt(String queue, String partition,
      Resource allocated) {
    Resource total = toPreemptedFromOtherQueues.get(queue, partition);
    if (null == total) {
      total = Resources.createResource(0);
      toPreemptedFromOtherQueues.put(queue, partition, total);
    }
    Resources.addTo(total, allocated);
  }
  @Override
  public Map<ApplicationAttemptId, Set<RMContainer>> selectCandidates(
      Map<ApplicationAttemptId, Set<RMContainer>> selectedCandidates,
      Resource clusterResource,
      Resource totalPreemptedResourceAllowed) {
    // Initialize digraph from queues
    // TODO (wangda): only do this when queue refreshed.
    priorityDigraph.clear();
    intializePriorityDigraph();
    // When all queues are set to same priority, or priority is not respected,
    // direct return.
    if (priorityDigraph.isEmpty()) {
      return selectedCandidates;
    }
    // Save parameters to be shared by other methods
    this.selectedCandidates = selectedCandidates;
    this.clusterResource = clusterResource;
    this.totalPreemptionAllowed = totalPreemptedResourceAllowed;
    toPreemptedFromOtherQueues.clear();
    reservedContainers = new ArrayList<>();
    // Clear temp-scheduler-node-map every time when doing selection of
    // containers.
    tempSchedulerNodeMap.clear();
    touchedNodes = new HashSet<>();
    // Add all reserved containers for analysis
    List<FiCaSchedulerNode> allSchedulerNodes =
        preemptionContext.getScheduler().getAllNodes();
    for (FiCaSchedulerNode node : allSchedulerNodes) {
      RMContainer reservedContainer = node.getReservedContainer();
      if (null != reservedContainer) {
        // Add to reservedContainers list if the queue that the reserved
        // container belongs to has high priority than at least one queue
        if (priorityDigraph.containsRow(
            reservedContainer.getQueueName())) {
          reservedContainers.add(reservedContainer);
        }
      }
    }
    // Sort reserved container by creation time
    Collections.sort(reservedContainers, CONTAINER_CREATION_TIME_COMPARATOR);
    long currentTime = System.currentTimeMillis();
    // From the begining of the list
    for (RMContainer reservedContainer : reservedContainers) {
      // Only try to preempt reserved container after reserved container created
      // and cannot be allocated after minTimeout
      if (currentTime - reservedContainer.getCreationTime() < minTimeout) {
        continue;
      }
      FiCaSchedulerNode node = preemptionContext.getScheduler().getNode(
          reservedContainer.getReservedNode());
      if (null == node) {
        // Something is wrong, ignore
        continue;
      }
      List<RMContainer> newlySelectedToBePreemptContainers = new ArrayList<>();
      // Check if we can preempt for this queue
      // We will skip if the demanding queue is already satisfied.
      String demandingQueueName = reservedContainer.getQueueName();
      boolean demandingQueueSatisfied = isQueueSatisfied(demandingQueueName,
          node.getPartition());
      // We will continue check if it is possible to preempt reserved container
      // from the node.
      boolean canPreempt = false;
      if (!demandingQueueSatisfied) {
        canPreempt = canPreemptEnoughResourceForAsked(
            reservedContainer.getReservedResource(), demandingQueueName, node,
            false, newlySelectedToBePreemptContainers);
      }
      // Add selected container if we can allocate reserved container by
      // preemption others
      if (canPreempt) {
        touchedNodes.add(node.getNodeID());
        if (LOG.isDebugEnabled()) {
          LOG.debug("Trying to preempt following containers to make reserved "
              + "container=" + reservedContainer.getContainerId() + " on node="
              + node.getNodeID() + " can be allocated:");
        }
        // Update to-be-preempt
        incToPreempt(demandingQueueName, node.getPartition(),
            reservedContainer.getReservedResource());
        for (RMContainer c : newlySelectedToBePreemptContainers) {
          if (LOG.isDebugEnabled()) {
            LOG.debug(" --container=" + c.getContainerId() + " resource=" + c
                .getReservedResource());
          }
          Set<RMContainer> containers = selectedCandidates.get(
              c.getApplicationAttemptId());
          if (null == containers) {
            containers = new HashSet<>();
            selectedCandidates.put(c.getApplicationAttemptId(), containers);
          }
          containers.add(c);
          // Update totalPreemptionResourceAllowed
          Resources.subtractFrom(totalPreemptedResourceAllowed,
              c.getAllocatedResource());
        }
      } else if (!demandingQueueSatisfied) {
        // We failed to get enough resource to allocate the container
        // This typically happens when the reserved node is proper, will
        // try to see if we can reserve the container on a better host.
        // Only do this if the demanding queue is not satisfied.
        //
        // TODO (wangda): do more tests before making it usable
        //
        if (allowMoveReservation) {
          tryToMakeBetterReservationPlacement(reservedContainer,
              allSchedulerNodes);
        }
      }
    }
    return selectedCandidates;
  }
} 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/zstd/TestZStandardCompressorDecompressor.java 
 | 485 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 * http://www.apache.org/licenses/LICENSE-2.0
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.io.compress.zstd;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.compress.CompressionInputStream;
import org.apache.hadoop.io.compress.CompressionOutputStream;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.CompressorStream;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DecompressorStream;
import org.apache.hadoop.io.compress.ZStandardCodec;
import org.apache.hadoop.test.MultithreadedTestUtil;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Random;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
import static org.junit.Assert.assertArrayEquals;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assume.assumeTrue;
public class TestZStandardCompressorDecompressor {
  private final static char[] HEX_ARRAY = "0123456789ABCDEF".toCharArray();
  private static final Random RANDOM = new Random(12345L);
  private static final Configuration CONFIGURATION = new Configuration();
  private static File compressedFile;
  private static File uncompressedFile;
  @BeforeClass
  public static void beforeClass() throws Exception {
    CONFIGURATION.setInt(IO_FILE_BUFFER_SIZE_KEY, 1024 * 64);
    uncompressedFile = new File(TestZStandardCompressorDecompressor.class
        .getResource("/zstd/test_file.txt").toURI());
    compressedFile = new File(TestZStandardCompressorDecompressor.class
        .getResource("/zstd/test_file.txt.zst").toURI());
  }
  @Before
  public void before() throws Exception {
    assumeTrue(ZStandardCodec.isNativeCodeLoaded());
  }
  @Test
  public void testCompressionCompressesCorrectly() throws Exception {
    int uncompressedSize = (int) FileUtils.sizeOf(uncompressedFile);
    byte[] bytes = FileUtils.readFileToByteArray(uncompressedFile);
    assertEquals(uncompressedSize, bytes.length);
    Configuration conf = new Configuration();
    ZStandardCodec codec = new ZStandardCodec();
    codec.setConf(conf);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    Compressor compressor = codec.createCompressor();
    CompressionOutputStream outputStream =
        codec.createOutputStream(baos, compressor);
    for (byte aByte : bytes) {
      outputStream.write(aByte);
    }
    outputStream.finish();
    outputStream.close();
    assertEquals(uncompressedSize, compressor.getBytesRead());
    assertTrue(compressor.finished());
    // just make sure we can decompress the file
    ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
    ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
    Decompressor decompressor = codec.createDecompressor();
    CompressionInputStream inputStream =
        codec.createInputStream(bais, decompressor);
    byte[] buffer = new byte[100];
    int n = buffer.length;
    while ((n = inputStream.read(buffer, 0, n)) != -1) {
      byteArrayOutputStream.write(buffer, 0, n);
    }
    assertArrayEquals(bytes, byteArrayOutputStream.toByteArray());
  }
  @Test(expected = NullPointerException.class)
  public void testCompressorSetInputNullPointerException() {
    ZStandardCompressor compressor = new ZStandardCompressor();
    compressor.setInput(null, 0, 10);
  }
  //test on NullPointerException in {@code decompressor.setInput()}
  @Test(expected = NullPointerException.class)
  public void testDecompressorSetInputNullPointerException() {
    ZStandardDecompressor decompressor =
        new ZStandardDecompressor(IO_FILE_BUFFER_SIZE_DEFAULT);
    decompressor.setInput(null, 0, 10);
  }
  //test on ArrayIndexOutOfBoundsException in {@code compressor.setInput()}
  @Test(expected = ArrayIndexOutOfBoundsException.class)
  public void testCompressorSetInputAIOBException() {
    ZStandardCompressor compressor = new ZStandardCompressor();
    compressor.setInput(new byte[] {}, -5, 10);
  }
  //test on ArrayIndexOutOfBoundsException in {@code decompressor.setInput()}
  @Test(expected = ArrayIndexOutOfBoundsException.class)
  public void testDecompressorSetInputAIOUBException() {
    ZStandardDecompressor decompressor =
        new ZStandardDecompressor(IO_FILE_BUFFER_SIZE_DEFAULT);
    decompressor.setInput(new byte[] {}, -5, 10);
  }
  //test on NullPointerException in {@code compressor.compress()}
  @Test(expected = NullPointerException.class)
  public void testCompressorCompressNullPointerException() throws Exception {
    ZStandardCompressor compressor = new ZStandardCompressor();
    byte[] bytes = generate(1024 * 6);
    compressor.setInput(bytes, 0, bytes.length);
    compressor.compress(null, 0, 0);
  }
  //test on NullPointerException in {@code decompressor.decompress()}
  @Test(expected = NullPointerException.class)
  public void testDecompressorCompressNullPointerException() throws Exception {
    ZStandardDecompressor decompressor =
        new ZStandardDecompressor(IO_FILE_BUFFER_SIZE_DEFAULT);
    byte[] bytes = generate(1024 * 6);
    decompressor.setInput(bytes, 0, bytes.length);
    decompressor.decompress(null, 0, 0);
  }
  //test on ArrayIndexOutOfBoundsException in {@code compressor.compress()}
  @Test(expected = ArrayIndexOutOfBoundsException.class)
  public void testCompressorCompressAIOBException() throws Exception {
    ZStandardCompressor compressor = new ZStandardCompressor();
    byte[] bytes = generate(1024 * 6);
    compressor.setInput(bytes, 0, bytes.length);
    compressor.compress(new byte[] {}, 0, -1);
  }
  //test on ArrayIndexOutOfBoundsException in decompressor.decompress()
  @Test(expected = ArrayIndexOutOfBoundsException.class)
  public void testDecompressorCompressAIOBException() throws Exception {
    ZStandardDecompressor decompressor =
        new ZStandardDecompressor(IO_FILE_BUFFER_SIZE_DEFAULT);
    byte[] bytes = generate(1024 * 6);
    decompressor.setInput(bytes, 0, bytes.length);
    decompressor.decompress(new byte[] {}, 0, -1);
  }
  // test ZStandardCompressor compressor.compress()
  @Test
  public void testSetInputWithBytesSizeMoreThenDefaultZStandardBufferSize()
      throws Exception {
    int bytesSize = 1024 * 2056 + 1;
    ZStandardCompressor compressor = new ZStandardCompressor();
    byte[] bytes = generate(bytesSize);
    assertTrue("needsInput error !!!", compressor.needsInput());
    compressor.setInput(bytes, 0, bytes.length);
    byte[] emptyBytes = new byte[bytesSize];
    int cSize = compressor.compress(emptyBytes, 0, bytes.length);
    assertTrue(cSize > 0);
  }
  // test compress/decompress process through
  // CompressionOutputStream/CompressionInputStream api
  @Test
  public void testCompressorDecompressorLogicWithCompressionStreams()
      throws Exception {
    DataOutputStream deflateOut = null;
    DataInputStream inflateIn = null;
    int byteSize = 1024 * 100;
    byte[] bytes = generate(byteSize);
    int bufferSize = IO_FILE_BUFFER_SIZE_DEFAULT;
    try {
      DataOutputBuffer compressedDataBuffer = new DataOutputBuffer();
      CompressionOutputStream deflateFilter =
          new CompressorStream(compressedDataBuffer, new ZStandardCompressor(),
              bufferSize);
      deflateOut =
          new DataOutputStream(new BufferedOutputStream(deflateFilter));
      deflateOut.write(bytes, 0, bytes.length);
      deflateOut.flush();
      deflateFilter.finish();
      DataInputBuffer deCompressedDataBuffer = new DataInputBuffer();
      deCompressedDataBuffer.reset(compressedDataBuffer.getData(), 0,
          compressedDataBuffer.getLength());
      CompressionInputStream inflateFilter =
          new DecompressorStream(deCompressedDataBuffer,
              new ZStandardDecompressor(bufferSize), bufferSize);
      inflateIn = new DataInputStream(new BufferedInputStream(inflateFilter));
      byte[] result = new byte[byteSize];
      inflateIn.read(result);
      assertArrayEquals("original array not equals compress/decompressed array",
          result, bytes);
    } finally {
      IOUtils.closeQuietly(deflateOut);
      IOUtils.closeQuietly(inflateIn);
    }
  }
  @Test
  public void testZStandardCompressDecompressInMultiThreads() throws Exception {
    MultithreadedTestUtil.TestContext ctx =
        new MultithreadedTestUtil.TestContext();
    for (int i = 0; i < 10; i++) {
      ctx.addThread(new MultithreadedTestUtil.TestingThread(ctx) {
        @Override
        public void doWork() throws Exception {
          testCompressDecompress();
        }
      });
    }
    ctx.startThreads();
    ctx.waitFor(60000);
  }
  @Test
  public void testCompressDecompress() throws Exception {
    byte[] rawData;
    int rawDataSize;
    rawDataSize = IO_FILE_BUFFER_SIZE_DEFAULT;
    rawData = generate(rawDataSize);
    ZStandardCompressor compressor = new ZStandardCompressor();
    ZStandardDecompressor decompressor =
        new ZStandardDecompressor(IO_FILE_BUFFER_SIZE_DEFAULT);
    assertFalse(compressor.finished());
    compressor.setInput(rawData, 0, rawData.length);
    assertEquals(0, compressor.getBytesRead());
    compressor.finish();
    byte[] compressedResult = new byte[rawDataSize];
    int cSize = compressor.compress(compressedResult, 0, rawDataSize);
    assertEquals(rawDataSize, compressor.getBytesRead());
    assertTrue(cSize < rawDataSize);
    decompressor.setInput(compressedResult, 0, cSize);
    byte[] decompressedBytes = new byte[rawDataSize];
    decompressor.decompress(decompressedBytes, 0, decompressedBytes.length);
    assertEquals(bytesToHex(rawData), bytesToHex(decompressedBytes));
    compressor.reset();
    decompressor.reset();
  }
  @Test
  public void testCompressingWithOneByteOutputBuffer() throws Exception {
    int uncompressedSize = (int) FileUtils.sizeOf(uncompressedFile);
    byte[] bytes = FileUtils.readFileToByteArray(uncompressedFile);
    assertEquals(uncompressedSize, bytes.length);
    Configuration conf = new Configuration();
    ZStandardCodec codec = new ZStandardCodec();
    codec.setConf(conf);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    Compressor compressor =
        new ZStandardCompressor(3, IO_FILE_BUFFER_SIZE_DEFAULT, 1);
    CompressionOutputStream outputStream =
        codec.createOutputStream(baos, compressor);
    for (byte aByte : bytes) {
      outputStream.write(aByte);
    }
    outputStream.finish();
    outputStream.close();
    assertEquals(uncompressedSize, compressor.getBytesRead());
    assertTrue(compressor.finished());
    // just make sure we can decompress the file
    ByteArrayOutputStream byteArrayOutputStream = new ByteArrayOutputStream();
    ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
    Decompressor decompressor = codec.createDecompressor();
    CompressionInputStream inputStream =
        codec.createInputStream(bais, decompressor);
    byte[] buffer = new byte[100];
    int n = buffer.length;
    while ((n = inputStream.read(buffer, 0, n)) != -1) {
      byteArrayOutputStream.write(buffer, 0, n);
    }
    assertArrayEquals(bytes, byteArrayOutputStream.toByteArray());
  }
  @Test
  public void testZStandardCompressDecompress() throws Exception {
    byte[] rawData = null;
    int rawDataSize = 0;
    rawDataSize = IO_FILE_BUFFER_SIZE_DEFAULT;
    rawData = generate(rawDataSize);
    ZStandardCompressor compressor = new ZStandardCompressor();
    ZStandardDecompressor decompressor = new ZStandardDecompressor(rawDataSize);
    assertTrue(compressor.needsInput());
    assertFalse("testZStandardCompressDecompress finished error",
        compressor.finished());
    compressor.setInput(rawData, 0, rawData.length);
    compressor.finish();
    byte[] compressedResult = new byte[rawDataSize];
    int cSize = compressor.compress(compressedResult, 0, rawDataSize);
    assertEquals(rawDataSize, compressor.getBytesRead());
    assertTrue("compressed size no less then original size",
        cSize < rawDataSize);
    decompressor.setInput(compressedResult, 0, cSize);
    byte[] decompressedBytes = new byte[rawDataSize];
    decompressor.decompress(decompressedBytes, 0, decompressedBytes.length);
    String decompressed = bytesToHex(decompressedBytes);
    String original = bytesToHex(rawData);
    assertEquals(original, decompressed);
    compressor.reset();
    decompressor.reset();
  }
  @Test
  public void testDecompressingOutput() throws Exception {
    byte[] expectedDecompressedResult =
        FileUtils.readFileToByteArray(uncompressedFile);
    ZStandardCodec codec = new ZStandardCodec();
    codec.setConf(CONFIGURATION);
    CompressionInputStream inputStream = codec
        .createInputStream(FileUtils.openInputStream(compressedFile),
            codec.createDecompressor());
    byte[] toDecompress = new byte[100];
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    byte[] decompressedResult;
    int totalFileSize = 0;
    int result = toDecompress.length;
    try {
      while ((result = inputStream.read(toDecompress, 0, result)) != -1) {
        baos.write(toDecompress, 0, result);
        totalFileSize += result;
      }
      decompressedResult = baos.toByteArray();
    } finally {
      IOUtils.closeQuietly(baos);
    }
    assertEquals(decompressedResult.length, totalFileSize);
    assertEquals(bytesToHex(expectedDecompressedResult),
        bytesToHex(decompressedResult));
  }
  @Test
  public void testZStandardDirectCompressDecompress() throws Exception {
    int[] size = {1, 4, 16, 4 * 1024, 64 * 1024, 128 * 1024, 1024 * 1024 };
    for (int aSize : size) {
      System.out.println("aSize = " + aSize);
      compressDecompressLoop(aSize);
    }
  }
  private void compressDecompressLoop(int rawDataSize) throws IOException {
    byte[] rawData = null;
    rawData = generate(rawDataSize);
    ByteArrayOutputStream baos = new ByteArrayOutputStream(rawDataSize + 12);
    CompressionOutputStream deflateFilter =
        new CompressorStream(baos, new ZStandardCompressor(), 4096);
    DataOutputStream deflateOut =
        new DataOutputStream(new BufferedOutputStream(deflateFilter));
    deflateOut.write(rawData, 0, rawData.length);
    deflateOut.flush();
    deflateFilter.finish();
    byte[] compressedResult = baos.toByteArray();
    int compressedSize = compressedResult.length;
    ZStandardDecompressor.ZStandardDirectDecompressor decompressor =
        new ZStandardDecompressor.ZStandardDirectDecompressor(4096);
    ByteBuffer inBuf = ByteBuffer.allocateDirect(compressedSize);
    ByteBuffer outBuf = ByteBuffer.allocateDirect(8096);
    inBuf.put(compressedResult, 0, compressedSize);
    inBuf.flip();
    ByteBuffer expected = ByteBuffer.wrap(rawData);
    outBuf.clear();
    while (!decompressor.finished()) {
      decompressor.decompress(inBuf, outBuf);
      if (outBuf.remaining() == 0) {
        outBuf.flip();
        while (outBuf.remaining() > 0) {
          assertEquals(expected.get(), outBuf.get());
        }
        outBuf.clear();
      }
    }
    outBuf.flip();
    while (outBuf.remaining() > 0) {
      assertEquals(expected.get(), outBuf.get());
    }
    outBuf.clear();
    assertEquals(0, expected.remaining());
  }
  @Test
  public void testReadingWithAStream() throws Exception {
    FileInputStream inputStream = FileUtils.openInputStream(compressedFile);
    ZStandardCodec codec = new ZStandardCodec();
    codec.setConf(CONFIGURATION);
    Decompressor decompressor = codec.createDecompressor();
    CompressionInputStream cis =
        codec.createInputStream(inputStream, decompressor);
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    byte[] resultOfDecompression;
    try {
      byte[] buffer = new byte[100];
      int n;
      while ((n = cis.read(buffer, 0, buffer.length)) != -1) {
        baos.write(buffer, 0, n);
      }
      resultOfDecompression = baos.toByteArray();
    } finally {
      IOUtils.closeQuietly(baos);
      IOUtils.closeQuietly(cis);
    }
    byte[] expected = FileUtils.readFileToByteArray(uncompressedFile);
    assertEquals(bytesToHex(expected), bytesToHex(resultOfDecompression));
  }
  @Test
  public void testDecompressReturnsWhenNothingToDecompress() throws Exception {
    ZStandardDecompressor decompressor =
        new ZStandardDecompressor(IO_FILE_BUFFER_SIZE_DEFAULT);
    int result = decompressor.decompress(new byte[10], 0, 10);
    assertEquals(0, result);
  }
  public static byte[] generate(int size) {
    byte[] data = new byte[size];
    for (int i = 0; i < size; i++) {
      data[i] = (byte) RANDOM.nextInt(16);
    }
    return data;
  }
  private static String bytesToHex(byte[] bytes) {
    char[] hexChars = new char[bytes.length * 2];
    for (int j = 0; j < bytes.length; j++) {
      int v = bytes[j] & 0xFF;
      hexChars[j * 2] = HEX_ARRAY[v >>> 4];
      hexChars[j * 2 + 1] = HEX_ARRAY[v & 0x0F];
    }
    return new String(hexChars);
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestFixKerberosTicketOrder.java 
 | 158 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.security;
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.security.PrivilegedExceptionAction;
import java.util.HashMap;
import java.util.Map;
import javax.security.auth.Subject;
import javax.security.auth.kerberos.KerberosTicket;
import javax.security.sasl.Sasl;
import javax.security.sasl.SaslClient;
import javax.security.sasl.SaslException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.minikdc.KerberosSecurityTestcase;
import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.junit.Before;
import org.junit.Test;
/**
 * Testcase for HADOOP-13433 that verifies the logic of fixKerberosTicketOrder.
 */
public class TestFixKerberosTicketOrder extends KerberosSecurityTestcase {
  private String clientPrincipal = "client";
  private String server1Protocol = "server1";
  private String server2Protocol = "server2";
  private String host = "localhost";
  private String server1Principal = server1Protocol + "/" + host;
  private String server2Principal = server2Protocol + "/" + host;
  private File keytabFile;
  private Configuration conf = new Configuration();
  private Map<String, String> props;
  @Before
  public void setUp() throws Exception {
    keytabFile = new File(getWorkDir(), "keytab");
    getKdc().createPrincipal(keytabFile, clientPrincipal, server1Principal,
        server2Principal);
    SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
    UserGroupInformation.setConfiguration(conf);
    UserGroupInformation.setShouldRenewImmediatelyForTests(true);
    props = new HashMap<String, String>();
    props.put(Sasl.QOP, QualityOfProtection.AUTHENTICATION.saslQop);
  }
  @Test
  public void test() throws Exception {
    UserGroupInformation ugi =
        UserGroupInformation.loginUserFromKeytabAndReturnUGI(clientPrincipal,
            keytabFile.getCanonicalPath());
    ugi.doAs(new PrivilegedExceptionAction<Void>() {
      @Override
      public Void run() throws Exception {
        SaslClient client = Sasl.createSaslClient(
            new String[] {AuthMethod.KERBEROS.getMechanismName()},
            clientPrincipal, server1Protocol, host, props, null);
        client.evaluateChallenge(new byte[0]);
        client.dispose();
        return null;
      }
    });
    Subject subject = ugi.getSubject();
    // move tgt to the last
    for (KerberosTicket ticket : subject
        .getPrivateCredentials(KerberosTicket.class)) {
      if (ticket.getServer().getName().startsWith("krbtgt")) {
        subject.getPrivateCredentials().remove(ticket);
        subject.getPrivateCredentials().add(ticket);
        break;
      }
    }
    // make sure the first ticket is not tgt
    assertFalse(
        "The first ticket is still tgt, "
            + "the implementation in jdk may have been changed, "
            + "please reconsider the problem in HADOOP-13433",
        subject.getPrivateCredentials().stream()
            .filter(c -> c instanceof KerberosTicket)
            .map(c -> ((KerberosTicket) c).getServer().getName()).findFirst()
            .get().startsWith("krbtgt"));
    // should fail as we send a service ticket instead of tgt to KDC.
    intercept(SaslException.class,
        () -> ugi.doAs(new PrivilegedExceptionAction<Void>() {
          @Override
          public Void run() throws Exception {
            SaslClient client = Sasl.createSaslClient(
                new String[] {AuthMethod.KERBEROS.getMechanismName()},
                clientPrincipal, server2Protocol, host, props, null);
            client.evaluateChallenge(new byte[0]);
            client.dispose();
            return null;
          }
        }));
    ugi.fixKerberosTicketOrder();
    // check if TGT is the first ticket after the fix.
    assertTrue("The first ticket is not tgt",
        subject.getPrivateCredentials().stream()
            .filter(c -> c instanceof KerberosTicket)
            .map(c -> ((KerberosTicket) c).getServer().getName()).findFirst()
            .get().startsWith("krbtgt"));
    // make sure we can still get new service ticket after the fix.
    ugi.doAs(new PrivilegedExceptionAction<Void>() {
      @Override
      public Void run() throws Exception {
        SaslClient client = Sasl.createSaslClient(
            new String[] {AuthMethod.KERBEROS.getMechanismName()},
            clientPrincipal, server2Protocol, host, props, null);
        client.evaluateChallenge(new byte[0]);
        client.dispose();
        return null;
      }
    });
    assertTrue("No service ticket for " + server2Protocol + " found",
        subject.getPrivateCredentials(KerberosTicket.class).stream()
            .filter(t -> t.getServer().getName().startsWith(server2Protocol))
            .findAny().isPresent());
  }
} 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/TestPriorityUtilizationQueueOrderingPolicy.java 
 | 222 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableTable;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.QueueCapacities;
import org.junit.Assert;
import org.junit.Test;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
public class TestPriorityUtilizationQueueOrderingPolicy {
  private List<CSQueue> mockCSQueues(String[] queueNames, int[] priorities,
      float[] utilizations, String partition) {
    // sanity check
    assert queueNames != null && priorities != null && utilizations != null
        && queueNames.length > 0 && queueNames.length == priorities.length
        && priorities.length == utilizations.length;
    List<CSQueue> list = new ArrayList<>();
    for (int i = 0; i < queueNames.length; i++) {
      CSQueue q = mock(CSQueue.class);
      when(q.getQueueName()).thenReturn(queueNames[i]);
      QueueCapacities qc = new QueueCapacities(false);
      qc.setUsedCapacity(partition, utilizations[i]);
      when(q.getQueueCapacities()).thenReturn(qc);
      when(q.getPriority()).thenReturn(Priority.newInstance(priorities[i]));
      list.add(q);
    }
    return list;
  }
  private void verifyOrder(QueueOrderingPolicy orderingPolicy, String partition,
      String[] expectedOrder) {
    Iterator<CSQueue> iter = orderingPolicy.getAssignmentIterator(partition);
    int i = 0;
    while (iter.hasNext()) {
      CSQueue q = iter.next();
      Assert.assertEquals(expectedOrder[i], q.getQueueName());
      i++;
    }
    assert i == expectedOrder.length;
  }
  @Test
  public void testUtilizationOrdering() {
    PriorityUtilizationQueueOrderingPolicy policy =
        new PriorityUtilizationQueueOrderingPolicy(false);
    // Case 1, one queue
    policy.setQueues(mockCSQueues(new String[] { "a" }, new int[] { 0 },
        new float[] { 0.1f }, ""));
    verifyOrder(policy, "", new String[] { "a" });
    // Case 2, 2 queues
    policy.setQueues(mockCSQueues(new String[] { "a", "b" }, new int[] { 0, 0 },
        new float[] { 0.1f, 0.0f }, ""));
    verifyOrder(policy, "", new String[] { "b", "a" });
    // Case 3, 3 queues
    policy.setQueues(
        mockCSQueues(new String[] { "a", "b", "c" }, new int[] { 0, 0, 0 },
            new float[] { 0.1f, 0.0f, 0.2f }, ""));
    verifyOrder(policy, "", new String[] { "b", "a", "c" });
    // Case 4, 3 queues, ignore priority
    policy.setQueues(
        mockCSQueues(new String[] { "a", "b", "c" }, new int[] { 2, 1, 0 },
            new float[] { 0.1f, 0.0f, 0.2f }, ""));
    verifyOrder(policy, "", new String[] { "b", "a", "c" });
    // Case 5, 3 queues, look at partition (default)
    policy.setQueues(
        mockCSQueues(new String[] { "a", "b", "c" }, new int[] { 2, 1, 0 },
            new float[] { 0.1f, 0.0f, 0.2f }, "x"));
    verifyOrder(policy, "", new String[] { "a", "b", "c" });
    // Case 5, 3 queues, look at partition (x)
    policy.setQueues(
        mockCSQueues(new String[] { "a", "b", "c" }, new int[] { 2, 1, 0 },
            new float[] { 0.1f, 0.0f, 0.2f }, "x"));
    verifyOrder(policy, "x", new String[] { "b", "a", "c" });
    // Case 6, 3 queues, with different accessibility to partition
    List<CSQueue> queues = mockCSQueues(new String[] { "a", "b", "c" }, new int[] { 2, 1, 0 },
        new float[] { 0.1f, 0.0f, 0.2f }, "x");
    // a can access "x"
    when(queues.get(0).getAccessibleNodeLabels()).thenReturn(ImmutableSet.of("x", "y"));
    // c can access "x"
    when(queues.get(2).getAccessibleNodeLabels()).thenReturn(ImmutableSet.of("x", "y"));
    policy.setQueues(queues);
    verifyOrder(policy, "x", new String[] { "a", "c", "b" });
  }
  @Test
  public void testPriorityUtilizationOrdering() {
    PriorityUtilizationQueueOrderingPolicy policy =
        new PriorityUtilizationQueueOrderingPolicy(true);
    // Case 1, one queue
    policy.setQueues(mockCSQueues(new String[] { "a" }, new int[] { 1 },
        new float[] { 0.1f }, ""));
    verifyOrder(policy, "", new String[] { "a" });
    // Case 2, 2 queues, both under utilized, same priority
    policy.setQueues(mockCSQueues(new String[] { "a", "b" }, new int[] { 1, 1 },
        new float[] { 0.2f, 0.1f }, ""));
    verifyOrder(policy, "", new String[] { "b", "a" });
    // Case 3, 2 queues, both over utilized, same priority
    policy.setQueues(mockCSQueues(new String[] { "a", "b" }, new int[] { 1, 1 },
        new float[] { 1.1f, 1.2f }, ""));
    verifyOrder(policy, "", new String[] { "a", "b" });
    // Case 4, 2 queues, one under and one over, same priority
    policy.setQueues(mockCSQueues(new String[] { "a", "b" }, new int[] { 1, 1 },
        new float[] { 0.1f, 1.2f }, ""));
    verifyOrder(policy, "", new String[] { "a", "b" });
    // Case 5, 2 queues, both over utilized, different priority
    policy.setQueues(mockCSQueues(new String[] { "a", "b" }, new int[] { 1, 2 },
        new float[] { 1.1f, 1.2f }, ""));
    verifyOrder(policy, "", new String[] { "b", "a" });
    // Case 6, 2 queues, both under utilized, different priority
    policy.setQueues(mockCSQueues(new String[] { "a", "b" }, new int[] { 1, 2 },
        new float[] { 0.1f, 0.2f }, ""));
    verifyOrder(policy, "", new String[] { "b", "a" });
    // Case 7, 2 queues, one under utilized and one over utilized,
    // different priority (1)
    policy.setQueues(mockCSQueues(new String[] { "a", "b" }, new int[] { 1, 2 },
        new float[] { 0.1f, 1.2f }, ""));
    verifyOrder(policy, "", new String[] { "a", "b" });
    // Case 8, 2 queues, one under utilized and one over utilized,
    // different priority (1)
    policy.setQueues(mockCSQueues(new String[] { "a", "b" }, new int[] { 2, 1 },
        new float[] { 0.1f, 1.2f }, ""));
    verifyOrder(policy, "", new String[] { "a", "b" });
    // Case 9, 2 queues, one under utilized and one meet, different priority (1)
    policy.setQueues(mockCSQueues(new String[] { "a", "b" }, new int[] { 1, 2 },
        new float[] { 0.1f, 1.0f }, ""));
    verifyOrder(policy, "", new String[] { "a", "b" });
    // Case 10, 2 queues, one under utilized and one meet, different priority (2)
    policy.setQueues(mockCSQueues(new String[] { "a", "b" }, new int[] { 2, 1 },
        new float[] { 0.1f, 1.0f }, ""));
    verifyOrder(policy, "", new String[] { "a", "b" });
    // Case 11, 2 queues, one under utilized and one meet, same priority
    policy.setQueues(mockCSQueues(new String[] { "a", "b" }, new int[] { 1, 1 },
        new float[] { 0.1f, 1.0f }, ""));
    verifyOrder(policy, "", new String[] { "a", "b" });
    // Case 12, 2 queues, both meet, different priority
    policy.setQueues(mockCSQueues(new String[] { "a", "b" }, new int[] { 1, 2 },
        new float[] { 1.0f, 1.0f }, ""));
    verifyOrder(policy, "", new String[] { "b", "a" });
    // Case 13, 5 queues, different priority
    policy.setQueues(mockCSQueues(new String[] { "a", "b", "c", "d", "e" },
        new int[] { 1, 2, 0, 0, 3 },
        new float[] { 1.2f, 1.0f, 0.2f, 1.1f, 0.2f }, ""));
    verifyOrder(policy, "", new String[] { "e", "c", "b", "a", "d" });
    // Case 14, 5 queues, different priority, partition default;
    policy.setQueues(mockCSQueues(new String[] { "a", "b", "c", "d", "e" },
        new int[] { 1, 2, 0, 0, 3 },
        new float[] { 1.2f, 1.0f, 0.2f, 1.1f, 0.2f }, "x"));
    verifyOrder(policy, "", new String[] { "e", "b", "a", "c", "d" });
    // Case 15, 5 queues, different priority, partition x;
    policy.setQueues(mockCSQueues(new String[] { "a", "b", "c", "d", "e" },
        new int[] { 1, 2, 0, 0, 3 },
        new float[] { 1.2f, 1.0f, 0.2f, 1.1f, 0.2f }, "x"));
    verifyOrder(policy, "x", new String[] { "e", "c", "b", "a", "d" });
    // Case 16, 5 queues, different priority, partition x; and different
    // accessibility
    List<CSQueue> queues = mockCSQueues(new String[] { "a", "b", "c", "d", "e" },
        new int[] { 1, 2, 0, 0, 3 },
        new float[] { 1.2f, 1.0f, 0.2f, 1.1f, 0.2f }, "x");
    // Only a/d has access to x
    when(queues.get(0).getAccessibleNodeLabels()).thenReturn(
        ImmutableSet.of("x"));
    when(queues.get(3).getAccessibleNodeLabels()).thenReturn(
        ImmutableSet.of("x"));
    policy.setQueues(queues);
    verifyOrder(policy, "x", new String[] { "a", "d", "e", "c", "b" });
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestPreemptionForQueueWithPriorities.java 
 | 361 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 * <p>
 * http://www.apache.org/licenses/LICENSE-2.0
 * <p>
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
import org.junit.Before;
import org.junit.Test;
import java.io.IOException;
import static org.mockito.Matchers.argThat;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
public class TestPreemptionForQueueWithPriorities
    extends ProportionalCapacityPreemptionPolicyMockFramework {
  @Before
  public void setup() {
    super.setup();
    policy = new ProportionalCapacityPreemptionPolicy(rmContext, cs, mClock);
  }
  @Test
  public void testPreemptionForHighestPriorityUnderutilizedQueue()
      throws IOException {
    /**
     * The simplest test of queue with priorities, Queue structure is:
     *
     * <pre>
     *        root
     *       / |  \
     *      a  b   c
     * </pre>
     *
     * For priorities
     * - a=1
     * - b/c=2
     *
     * So c will preempt more resource from a, till a reaches guaranteed
     * resource.
     */
    String labelsConfig = "=100,true"; // default partition
    String nodesConfig = "n1="; // only one node
    String queuesConfig =
        // guaranteed,max,used,pending
        "root(=[100 100 100 100]);" + //root
            "-a(=[30 100 40 50]){priority=1};" + // a
            "-b(=[30 100 59 50]){priority=2};" + // b
            "-c(=[40 100 1 25]){priority=2}";    // c
    String appsConfig =
        //queueName\t(priority,resource,host,expression,#repeat,reserved)
        "a\t(1,1,n1,,40,false);" + // app1 in a
        "b\t(1,1,n1,,59,false);" + // app2 in b
        "c\t(1,1,n1,,1,false);";   // app3 in c
    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
    policy.editSchedule();
    // 10 preempted from app1, 15 preempted from app2, and nothing preempted
    // from app3
    verify(mDisp, times(10)).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(1))));
    verify(mDisp, times(15)).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(2))));
    verify(mDisp, never()).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(3))));
  }
  @Test
  public void testPreemptionForLowestPriorityUnderutilizedQueue()
      throws IOException {
    /**
     * Similar to above, make sure we can still make sure less utilized queue
     * can get resource first regardless of priority.
     *
     * Queue structure is:
     *
     * <pre>
     *        root
     *       / |  \
     *      a  b   c
     * </pre>
     *
     * For priorities
     * - a=1
     * - b=2
     * - c=0
     *
     * So c will preempt more resource from a, till a reaches guaranteed
     * resource.
     */
    String labelsConfig = "=100,true"; // default partition
    String nodesConfig = "n1="; // only one node
    String queuesConfig =
        // guaranteed,max,used,pending
        "root(=[100 100 100 100]);" + //root
            "-a(=[30 100 40 50]){priority=1};" + // a
            "-b(=[30 100 59 50]){priority=2};" + // b
            "-c(=[40 100 1 25]){priority=0}";    // c
    String appsConfig =
        //queueName\t(priority,resource,host,expression,#repeat,reserved)
            "a\t(1,1,n1,,40,false);" + // app1 in a
            "b\t(1,1,n1,,59,false);" + // app2 in b
            "c\t(1,1,n1,,1,false);";   // app3 in c
    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
    policy.editSchedule();
    // 10 preempted from app1, 15 preempted from app2, and nothing preempted
    // from app3
    verify(mDisp, times(10)).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(1))));
    verify(mDisp, times(15)).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(2))));
    verify(mDisp, never()).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(3))));
  }
  @Test
  public void testPreemptionWontHappenBetweenSatisfiedQueues()
      throws IOException {
    /**
     * No preemption happen if a queue is already satisfied, regardless of
     * priority
     *
     * Queue structure is:
     *
     * <pre>
     *        root
     *       / |  \
     *      a  b   c
     * </pre>
     *
     * For priorities
     * - a=1
     * - b=1
     * - c=2
     *
     * When c is satisfied, it will not preempt any resource from other queues
     */
    String labelsConfig = "=100,true"; // default partition
    String nodesConfig = "n1="; // only one node
    String queuesConfig =
        // guaranteed,max,used,pending
        "root(=[100 100 100 100]);" + //root
            "-a(=[30 100 0 0]){priority=1};" + // a
            "-b(=[30 100 40 50]){priority=1};" + // b
            "-c(=[40 100 60 25]){priority=2}";   // c
    String appsConfig =
        //queueName\t(priority,resource,host,expression,#repeat,reserved)
        "b\t(1,1,n1,,40,false);" + // app1 in b
        "c\t(1,1,n1,,60,false)"; // app2 in c
    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
    policy.editSchedule();
    // Nothing preempted
    verify(mDisp, never()).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(1))));
    verify(mDisp, never()).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(2))));
  }
  @Test
  public void testPreemptionForMultipleQueuesInTheSamePriorityBuckets()
      throws IOException {
    /**
     * When a cluster has different priorities, each priority has multiple
     * queues, preemption policy should try to balance resource between queues
     * with same priority by ratio of their capacities
     *
     * Queue structure is:
     *
     * <pre>
     * root
     * - a (capacity=10), p=1
     * - b (capacity=15), p=1
     * - c (capacity=20), p=2
     * - d (capacity=25), p=2
     * - e (capacity=30), p=2
     * </pre>
     */
    String labelsConfig = "=100,true"; // default partition
    String nodesConfig = "n1="; // only one node
    String queuesConfig =
        // guaranteed,max,used,pending
        "root(=[100 100 100 100]);" + //root
            "-a(=[10 100 35 50]){priority=1};" + // a
            "-b(=[15 100 25 50]){priority=1};" + // b
            "-c(=[20 100 39 50]){priority=2};" + // c
            "-d(=[25 100 0 0]){priority=2};" + // d
            "-e(=[30 100 1 99]){priority=2}";   // e
    String appsConfig =
        //queueName\t(priority,resource,host,expression,#repeat,reserved)
        "a\t(1,1,n1,,35,false);" + // app1 in a
        "b\t(1,1,n1,,25,false);" + // app2 in b
            "c\t(1,1,n1,,39,false);" + // app3 in c
            "e\t(1,1,n1,,1,false)"; // app4 in e
    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
    policy.editSchedule();
    // 23 preempted from app1, 6 preempted from app2, and nothing preempted
    // from app3/app4
    // (After preemption, a has 35 - 23 = 12, b has 25 - 6 = 19, so a:b after
    //  preemption is 1.58, close to 1.50)
    verify(mDisp, times(23)).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(1))));
    verify(mDisp, times(6)).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(2))));
    verify(mDisp, never()).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(3))));
    verify(mDisp, never()).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(4))));
  }
  @Test
  public void testPreemptionForPriorityAndDisablePreemption()
      throws IOException {
    /**
     * When a cluster has different priorities, each priority has multiple
     * queues, preemption policy should try to balance resource between queues
     * with same priority by ratio of their capacities.
     *
     * But also we need to make sure preemption disable will be honered
     * regardless of priority.
     *
     * Queue structure is:
     *
     * <pre>
     * root
     * - a (capacity=10), p=1
     * - b (capacity=15), p=1
     * - c (capacity=20), p=2
     * - d (capacity=25), p=2
     * - e (capacity=30), p=2
     * </pre>
     */
    String labelsConfig = "=100,true"; // default partition
    String nodesConfig = "n1="; // only one node
    String queuesConfig =
        // guaranteed,max,used,pending
        "root(=[100 100 100 100]);" + //root
            "-a(=[10 100 35 50]){priority=1,disable_preemption=true};" + // a
            "-b(=[15 100 25 50]){priority=1};" + // b
            "-c(=[20 100 39 50]){priority=2};" + // c
            "-d(=[25 100 0 0]){priority=2};" + // d
            "-e(=[30 100 1 99]){priority=2}";   // e
    String appsConfig =
        //queueName\t(priority,resource,host,expression,#repeat,reserved)
        "a\t(1,1,n1,,35,false);" + // app1 in a
            "b\t(1,1,n1,,25,false);" + // app2 in b
            "c\t(1,1,n1,,39,false);" + // app3 in c
            "e\t(1,1,n1,,1,false)"; // app4 in e
    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
    policy.editSchedule();
    // We suppose to preempt some resource from A, but now since queueA
    // disables preemption, so we need to preempt some resource from B and
    // some from C even if C has higher priority than A
    verify(mDisp, never()).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(1))));
    verify(mDisp, times(9)).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(2))));
    verify(mDisp, times(19)).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(3))));
    verify(mDisp, never()).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(4))));
  }
  @Test
  public void testPriorityPreemptionForHierarchicalOfQueues()
      throws IOException {
    /**
     * When a queue has multiple hierarchy and different priorities:
     *
     * <pre>
     * root
     * - a (capacity=30), p=1
     *   - a1 (capacity=40), p=1
     *   - a2 (capacity=60), p=1
     * - b (capacity=30), p=1
     *   - b1 (capacity=50), p=1
     *   - b1 (capacity=50), p=2
     * - c (capacity=40), p=2
     * </pre>
     */
    String labelsConfig = "=100,true"; // default partition
    String nodesConfig = "n1="; // only one node
    String queuesConfig =
        // guaranteed,max,used,pending
        "root(=[100 100 100 100]);" + //root
            "-a(=[30 100 40 50]){priority=1};" + // a
            "--a1(=[12 100 20 50]){priority=1};" + // a1
            "--a2(=[18 100 20 50]){priority=1};" + // a2
            "-b(=[30 100 59 50]){priority=1};" + // b
            "--b1(=[15 100 30 50]){priority=1};" + // b1
            "--b2(=[15 100 29 50]){priority=2};" + // b2
            "-c(=[40 100 1 30]){priority=1}";   // c
    String appsConfig =
        //queueName\t(priority,resource,host,expression,#repeat,reserved)
        "a1\t(1,1,n1,,20,false);" + // app1 in a1
            "a2\t(1,1,n1,,20,false);" + // app2 in a2
            "b1\t(1,1,n1,,30,false);" + // app3 in b1
            "b2\t(1,1,n1,,29,false);" + // app4 in b2
            "c\t(1,1,n1,,29,false)"; // app5 in c
    buildEnv(labelsConfig, nodesConfig, queuesConfig, appsConfig);
    policy.editSchedule();
    // Preemption should first divide capacities between a / b, and b2 should
    // get less preemption than b1 (because b2 has higher priority)
    verify(mDisp, times(5)).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(1))));
    verify(mDisp, never()).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(2))));
    verify(mDisp, times(15)).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(3))));
    verify(mDisp, times(9)).handle(argThat(
        new TestProportionalCapacityPreemptionPolicy.IsPreemptionRequestFor(
            getAppAttemptId(4))));
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.c 
 | 218 
							 | 
	/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#include "org_apache_hadoop_io_compress_zstd.h"
#if defined HADOOP_ZSTD_LIBRARY
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef UNIX
#include <dlfcn.h>
#include "config.h"
#endif
#include "org_apache_hadoop_io_compress_zstd_ZStandardDecompressor.h"
static jfieldID ZStandardDecompressor_stream;
static jfieldID ZStandardDecompressor_compressedDirectBufOff;
static jfieldID ZStandardDecompressor_bytesInCompressedBuffer;
static jfieldID ZStandardDecompressor_directBufferSize;
static jfieldID ZStandardDecompressor_finished;
static jfieldID ZStandardDecompressor_remaining;
#ifdef UNIX
static size_t (*dlsym_ZSTD_DStreamOutSize)(void);
static size_t (*dlsym_ZSTD_DStreamInSize)(void);
static ZSTD_DStream* (*dlsym_ZSTD_createDStream)(void);
static size_t (*dlsym_ZSTD_initDStream)(ZSTD_DStream*);
static size_t (*dlsym_ZSTD_freeDStream)(ZSTD_DStream*);
static size_t (*dlsym_ZSTD_resetDStream)(ZSTD_DStream*);
static size_t (*dlsym_ZSTD_decompressStream)(ZSTD_DStream*, ZSTD_outBuffer*, ZSTD_inBuffer*);
static size_t (*dlsym_ZSTD_flushStream)(ZSTD_CStream*, ZSTD_outBuffer*);
static unsigned (*dlsym_ZSTD_isError)(size_t);
static const char * (*dlsym_ZSTD_getErrorName)(size_t);
#endif
#ifdef WINDOWS
typedef size_t (__cdecl *__dlsym_ZSTD_DStreamOutSize)(void);
typedef size_t (__cdecl *__dlsym_ZSTD_DStreamInSize)(void);
typedef ZSTD_DStream* (__cdecl *__dlsym_ZSTD_createDStream)(void);
typedef size_t (__cdecl *__dlsym_ZSTD_initDStream)(ZSTD_DStream*);
typedef size_t (__cdecl *__dlsym_ZSTD_freeDStream)(ZSTD_DStream*);
typedef size_t (__cdecl *__dlsym_ZSTD_resetDStream)(ZSTD_DStream*);
typedef size_t (__cdecl *__dlsym_ZSTD_decompressStream)(ZSTD_DStream*, ZSTD_outBuffer*, ZSTD_inBuffer*);
typedef size_t (__cdecl *__dlsym_ZSTD_flushStream)(ZSTD_CStream*, ZSTD_outBuffer*);
typedef unsigned (__cdecl *__dlsym_ZSTD_isError)(size_t);
typedef const char * (__cdecl *__dlsym_ZSTD_getErrorName)(size_t);
static __dlsym_ZSTD_DStreamOutSize dlsym_ZSTD_DStreamOutSize;
static __dlsym_ZSTD_DStreamInSize dlsym_ZSTD_DStreamInSize;
static __dlsym_ZSTD_createDStream dlsym_ZSTD_createDStream;
static __dlsym_ZSTD_initDStream dlsym_ZSTD_initDStream;
static __dlsym_ZSTD_freeDStream dlsym_ZSTD_freeDStream;
static __dlsym_ZSTD_resetDStream dlsym_ZSTD_resetDStream;
static __dlsym_ZSTD_decompressStream dlsym_ZSTD_decompressStream;
static __dlsym_ZSTD_isError dlsym_ZSTD_isError;
static __dlsym_ZSTD_getErrorName dlsym_ZSTD_getErrorName;
static __dlsym_ZSTD_flushStream dlsym_ZSTD_flushStream;
#endif
JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_zstd_ZStandardDecompressor_initIDs (JNIEnv *env, jclass clazz) {
    // Load libzstd.so
#ifdef UNIX
    void *libzstd = dlopen(HADOOP_ZSTD_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
    if (!libzstd) {
        char* msg = (char*)malloc(1000);
        snprintf(msg, 1000, "%s (%s)!", "Cannot load " HADOOP_ZSTD_LIBRARY, dlerror());
        THROW(env, "java/lang/UnsatisfiedLinkError", msg);
        return;
    }
#endif
#ifdef WINDOWS
    HMODULE libzstd = LoadLibrary(HADOOP_ZSTD_LIBRARY);
    if (!libzstd) {
        THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load zstd.dll");
        return;
    }
#endif
#ifdef UNIX
    dlerror();
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_DStreamOutSize, env, libzstd, "ZSTD_DStreamOutSize");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_DStreamInSize, env, libzstd, "ZSTD_DStreamInSize");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_createDStream, env, libzstd, "ZSTD_createDStream");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_initDStream, env, libzstd, "ZSTD_initDStream");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_freeDStream, env, libzstd, "ZSTD_freeDStream");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_resetDStream, env, libzstd, "ZSTD_resetDStream");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_decompressStream, env, libzstd, "ZSTD_decompressStream");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_isError, env, libzstd, "ZSTD_isError");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_getErrorName, env, libzstd, "ZSTD_getErrorName");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_flushStream, env, libzstd, "ZSTD_flushStream");
#endif
#ifdef WINDOWS
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_DStreamOutSize, dlsym_ZSTD_DStreamOutSize, env, libzstd, "ZSTD_DStreamOutSize");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_DStreamInSize, dlsym_ZSTD_DStreamInSize, env, libzstd, "ZSTD_DStreamInSize");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_createDStream, dlsym_ZSTD_createDStream, env, libzstd, "ZSTD_createDStream");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_initDStream, dlsym_ZSTD_initDStream, env, libzstd, "ZSTD_initDStream");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_freeDStream, dlsym_ZSTD_freeDStream, env, libzstd, "ZSTD_freeDStream");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_resetDStream, dlsym_ZSTD_resetDStream, env, libzstd, "ZSTD_resetDStream");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_decompressStream, dlsym_ZSTD_decompressStream, env, libzstd, "ZSTD_decompressStream");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_isError, dlsym_ZSTD_isError, env, libzstd, "ZSTD_isError");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_getErrorName, dlsym_ZSTD_getErrorName, env, libzstd, "ZSTD_getErrorName");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_flushStream, dlsym_ZSTD_flushStream, env, libzstd, "ZSTD_flushStream");
#endif
    ZStandardDecompressor_stream = (*env)->GetFieldID(env, clazz, "stream", "J");
    ZStandardDecompressor_finished = (*env)->GetFieldID(env, clazz, "finished", "Z");
    ZStandardDecompressor_compressedDirectBufOff = (*env)->GetFieldID(env, clazz, "compressedDirectBufOff", "I");
    ZStandardDecompressor_bytesInCompressedBuffer = (*env)->GetFieldID(env, clazz, "bytesInCompressedBuffer", "I");
    ZStandardDecompressor_directBufferSize = (*env)->GetFieldID(env, clazz, "directBufferSize", "I");
    ZStandardDecompressor_remaining = (*env)->GetFieldID(env, clazz, "remaining", "I");
}
JNIEXPORT jlong JNICALL Java_org_apache_hadoop_io_compress_zstd_ZStandardDecompressor_create(JNIEnv *env, jobject this) {
    ZSTD_DStream * stream = dlsym_ZSTD_createDStream();
    if (stream == NULL) {
        THROW(env, "java/lang/InternalError", "Error creating stream");
        return (jlong) 0;
    }
    return (jlong) stream;
}
JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_zstd_ZStandardDecompressor_init(JNIEnv *env, jobject this, jlong stream) {
    size_t result = dlsym_ZSTD_initDStream((ZSTD_DStream *) stream);
    if (dlsym_ZSTD_isError(result)) {
        THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
        return;
    }
    (*env)->SetLongField(env, this, ZStandardDecompressor_remaining, 0);
}
JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_zstd_ZStandardDecompressor_free(JNIEnv *env, jclass obj, jlong stream) {
    size_t result = dlsym_ZSTD_freeDStream((ZSTD_DStream *) stream);
    if (dlsym_ZSTD_isError(result)) {
        THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
        return;
    }
}
JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_zstd_ZStandardDecompressor_inflateBytesDirect
(JNIEnv *env, jobject this, jobject compressed_direct_buf, jint compressed_direct_buf_off, jint compressed_direct_buf_len, jobject uncompressed_direct_buf, jint uncompressed_direct_buf_off, jint uncompressed_direct_buf_len) {
    ZSTD_DStream *stream = (ZSTD_DStream *) (*env)->GetLongField(env, this, ZStandardDecompressor_stream);
    if (!stream) {
        THROW(env, "java/lang/NullPointerException", NULL);
        return (jint)0;
    }
    // Get the input direct buffer
    void * compressed_bytes = (*env)->GetDirectBufferAddress(env, compressed_direct_buf);
    if (!compressed_bytes) {
        THROW(env, "java/lang/InternalError", "Undefined memory address for compressedDirectBuf");
        return (jint) 0;
    }
    // Get the output direct buffer
    void * uncompressed_bytes = (*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
    if (!uncompressed_bytes) {
        THROW(env, "java/lang/InternalError", "Undefined memory address for uncompressedDirectBuf");
        return (jint) 0;
    }
    uncompressed_bytes = ((char*) uncompressed_bytes) + uncompressed_direct_buf_off;
    ZSTD_inBuffer input = { compressed_bytes, compressed_direct_buf_len, compressed_direct_buf_off };
    ZSTD_outBuffer output = { uncompressed_bytes, uncompressed_direct_buf_len, 0 };
    size_t const size = dlsym_ZSTD_decompressStream(stream, &output, &input);
    // check for errors
    if (dlsym_ZSTD_isError(size)) {
        THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(size));
        return (jint) 0;
    }
    int remaining = input.size - input.pos;
    (*env)->SetIntField(env, this, ZStandardDecompressor_remaining, remaining);
    // the entire frame has been decoded
    if (size == 0) {
        (*env)->SetBooleanField(env, this, ZStandardDecompressor_finished, JNI_TRUE);
        size_t result = dlsym_ZSTD_initDStream(stream);
        if (dlsym_ZSTD_isError(result)) {
            THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
            return (jint) 0;
        }
    }
    (*env)->SetIntField(env, this, ZStandardDecompressor_compressedDirectBufOff, input.pos);
    (*env)->SetIntField(env, this, ZStandardDecompressor_bytesInCompressedBuffer, input.size);
    return (jint) output.pos;
}
// returns the max size of the recommended input and output buffers
JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_zstd_ZStandardDecompressor_getStreamSize
(JNIEnv *env, jclass obj) {
    int x = (int) dlsym_ZSTD_DStreamInSize();
    int y = (int) dlsym_ZSTD_DStreamOutSize();
    return (x >= y) ? x : y;
}
#endif //define HADOOP_ZSTD_LIBRARY 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/protocol/SlowPeerReports.java 
 | 107 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 * <p>
 * http://www.apache.org/licenses/LICENSE-2.0
 * <p>
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs.server.protocol;
import com.google.common.collect.ImmutableMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import javax.annotation.Nonnull;
import javax.annotation.Nullable;
import java.util.Map;
/**
 * A class that allows a DataNode to communicate information about all
 * its peer DataNodes that appear to be slow.
 *
 * The wire representation of this structure is a list of
 * SlowPeerReportProto messages.
 */
@InterfaceAudience.Private
@InterfaceStability.Unstable
public final class SlowPeerReports {
  /**
   * A map from the DataNode's DataNodeUUID to its aggregate latency
   * as seen by the reporting node.
   *
   * The exact choice of the aggregate is opaque to the NameNode but it
   * should be chosen consistently by all DataNodes in the cluster.
   * Examples of aggregates are 90th percentile (good) and mean (not so
   * good).
   *
   * The NameNode must not attempt to interpret the aggregate latencies
   * beyond exposing them as a diagnostic. e.g. metrics. Also, comparing
   * latencies across reports from different DataNodes may not be not
   * meaningful and must be avoided.
   */
  @Nonnull
  private final Map<String, Double> slowPeers;
  /**
   * An object representing a SlowPeerReports with no entries. Should
   * be used instead of null or creating new objects when there are
   * no slow peers to report.
   */
  public static final SlowPeerReports EMPTY_REPORT =
      new SlowPeerReports(ImmutableMap.of());
  private SlowPeerReports(Map<String, Double> slowPeers) {
    this.slowPeers = slowPeers;
  }
  public static SlowPeerReports create(
      @Nullable Map<String, Double> slowPeers) {
    if (slowPeers == null || slowPeers.isEmpty()) {
      return EMPTY_REPORT;
    }
    return new SlowPeerReports(slowPeers);
  }
  public Map<String, Double> getSlowPeers() {
    return slowPeers;
  }
  public boolean haveSlowPeers() {
    return slowPeers.size() > 0;
  }
  /**
   * Return true if the two objects represent the same set slow peer
   * entries. Primarily for unit testing convenience.
   */
  @Override
  public boolean equals(Object o) {
    if (this == o) {
      return true;
    }
    if (!(o instanceof SlowPeerReports)) {
      return false;
    }
    SlowPeerReports that = (SlowPeerReports) o;
    return slowPeers.equals(that.slowPeers);
  }
  @Override
  public int hashCode() {
    return slowPeers.hashCode();
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/policy/PriorityUtilizationQueueOrderingPolicy.java 
 | 192 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.policy;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.Iterator;
import java.util.List;
import java.util.function.Supplier;
/**
 * For two queues with the same priority:
 * - The queue with less relative used-capacity goes first - today’s behavior.
 * - The default priority for all queues is 0 and equal. So, we get today’s
 *   behaviour at every level - the queue with the lowest used-capacity
 *   percentage gets the resources
 *
 * For two queues with different priorities:
 * - Both the queues are under their guaranteed capacities: The queue with
 *   the higher priority gets resources
 * - Both the queues are over or meeting their guaranteed capacities:
 *   The queue with the higher priority gets resources
 * - One of the queues is over or meeting their guaranteed capacities and the
 *   other is under: The queue that is under its capacity guarantee gets the
 *   resources.
 */
public class PriorityUtilizationQueueOrderingPolicy implements QueueOrderingPolicy {
  private List<CSQueue> queues;
  private boolean respectPriority;
  // This makes multiple threads can sort queues at the same time
  // For different partitions.
  private static ThreadLocal<String> partitionToLookAt =
      ThreadLocal.withInitial(new Supplier<String>() {
        @Override
        public String get() {
          return RMNodeLabelsManager.NO_LABEL;
        }
      });
  /**
   * Compare two queues with possibly different priority and assigned capacity,
   * Will be used by preemption policy as well.
   *
   * @param relativeAssigned1 relativeAssigned1
   * @param relativeAssigned2 relativeAssigned2
   * @param priority1 p1
   * @param priority2 p2
   * @return compared result
   */
  public static int compare(double relativeAssigned1, double relativeAssigned2,
      int priority1, int priority2) {
    if (priority1 == priority2) {
      // The queue with less relative used-capacity goes first
      return Double.compare(relativeAssigned1, relativeAssigned2);
    } else {
      // When priority is different:
      if ((relativeAssigned1 < 1.0f && relativeAssigned2 < 1.0f) || (
          relativeAssigned1 >= 1.0f && relativeAssigned2 >= 1.0f)) {
        // When both the queues are under their guaranteed capacities,
        // Or both the queues are over or meeting their guaranteed capacities
        // queue with higher used-capacity goes first
        return Integer.compare(priority2, priority1);
      } else {
        // Otherwise, when one of the queues is over or meeting their
        // guaranteed capacities and the other is under: The queue that is
        // under its capacity guarantee gets the resources.
        return Double.compare(relativeAssigned1, relativeAssigned2);
      }
    }
  }
  /**
   * Comparator that both looks at priority and utilization
   */
  private class PriorityQueueComparator implements  Comparator<CSQueue> {
    @Override
    public int compare(CSQueue q1, CSQueue q2) {
      String p = partitionToLookAt.get();
      int rc = compareQueueAccessToPartition(q1, q2, p);
      if (0 != rc) {
        return rc;
      }
      float used1 = q1.getQueueCapacities().getUsedCapacity(p);
      float used2 = q2.getQueueCapacities().getUsedCapacity(p);
      int p1 = 0;
      int p2 = 0;
      if (respectPriority) {
        p1 = q1.getPriority().getPriority();
        p2 = q2.getPriority().getPriority();
      }
      rc = PriorityUtilizationQueueOrderingPolicy.compare(used1, used2, p1, p2);
      // For queue with same used ratio / priority, queue with higher configured
      // capacity goes first
      if (0 == rc) {
        float abs1 = q1.getQueueCapacities().getAbsoluteCapacity(p);
        float abs2 = q2.getQueueCapacities().getAbsoluteCapacity(p);
        return Float.compare(abs2, abs1);
      }
      return rc;
    }
    private int compareQueueAccessToPartition(CSQueue q1, CSQueue q2, String partition) {
      // Everybody has access to default partition
      if (StringUtils.equals(partition, RMNodeLabelsManager.NO_LABEL)) {
        return 0;
      }
      /*
       * Check accessible to given partition, if one queue accessible and
       * the other not, accessible queue goes first.
       */
      boolean q1Accessible =
          q1.getAccessibleNodeLabels() != null && q1.getAccessibleNodeLabels()
              .contains(partition) || q1.getAccessibleNodeLabels().contains(
              RMNodeLabelsManager.ANY);
      boolean q2Accessible =
          q2.getAccessibleNodeLabels() != null && q2.getAccessibleNodeLabels()
              .contains(partition) || q2.getAccessibleNodeLabels().contains(
              RMNodeLabelsManager.ANY);
      if (q1Accessible && !q2Accessible) {
        return -1;
      } else if (!q1Accessible && q2Accessible) {
        return 1;
      }
      return 0;
    }
  }
  public PriorityUtilizationQueueOrderingPolicy(boolean respectPriority) {
    this.respectPriority = respectPriority;
  }
  @Override
  public void setQueues(List<CSQueue> queues) {
    this.queues = queues;
  }
  @Override
  public Iterator<CSQueue> getAssignmentIterator(String partition) {
    // Since partitionToLookAt is a thread local variable, and every time we
    // copy and sort queues, so it's safe for multi-threading environment.
    PriorityUtilizationQueueOrderingPolicy.partitionToLookAt.set(partition);
    List<CSQueue> sortedQueue = new ArrayList<>(queues);
    Collections.sort(sortedQueue, new PriorityQueueComparator());
    return sortedQueue.iterator();
  }
  @Override
  public String getConfigName() {
    if (respectPriority) {
      return CapacitySchedulerConfiguration.QUEUE_PRIORITY_UTILIZATION_ORDERING_POLICY;
    } else{
      return CapacitySchedulerConfiguration.QUEUE_UTILIZATION_ORDERING_POLICY;
    }
  }
  @VisibleForTesting
  public List<CSQueue> getQueues() {
    return queues;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-hdfs-project/hadoop-hdfs-client/src/test/java/org/apache/hadoop/hdfs/protocol/TestBlockType.java 
 | 61 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs.protocol;
import org.junit.Test;
import static org.apache.hadoop.hdfs.protocol.BlockType.CONTIGUOUS;
import static org.apache.hadoop.hdfs.protocol.BlockType.STRIPED;
import static org.junit.Assert.*;
/**
 * Test the BlockType class.
 */
public class TestBlockType {
  @Test
  public void testGetBlockType() throws Exception {
    assertEquals(BlockType.fromBlockId(0x0000000000000000L), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x1000000000000000L), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x2000000000000000L), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x4000000000000000L), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x7000000000000000L), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x00000000ffffffffL), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x10000000ffffffffL), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x20000000ffffffffL), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x40000000ffffffffL), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x70000000ffffffffL), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x70000000ffffffffL), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x0fffffffffffffffL), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x1fffffffffffffffL), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x2fffffffffffffffL), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x4fffffffffffffffL), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x7fffffffffffffffL), CONTIGUOUS);
    assertEquals(BlockType.fromBlockId(0x8000000000000000L), STRIPED);
    assertEquals(BlockType.fromBlockId(0x9000000000000000L), STRIPED);
    assertEquals(BlockType.fromBlockId(0xa000000000000000L), STRIPED);
    assertEquals(BlockType.fromBlockId(0xf000000000000000L), STRIPED);
    assertEquals(BlockType.fromBlockId(0x80000000ffffffffL), STRIPED);
    assertEquals(BlockType.fromBlockId(0x90000000ffffffffL), STRIPED);
    assertEquals(BlockType.fromBlockId(0xa0000000ffffffffL), STRIPED);
    assertEquals(BlockType.fromBlockId(0xf0000000ffffffffL), STRIPED);
    assertEquals(BlockType.fromBlockId(0x8fffffffffffffffL), STRIPED);
    assertEquals(BlockType.fromBlockId(0x9fffffffffffffffL), STRIPED);
    assertEquals(BlockType.fromBlockId(0xafffffffffffffffL), STRIPED);
    assertEquals(BlockType.fromBlockId(0xffffffffffffffffL), STRIPED);
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestSlowNodeDetector.java 
 | 335 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 * <p>
 * http://www.apache.org/licenses/LICENSE-2.0
 * <p>
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs.server.datanode.metrics;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import org.apache.commons.lang3.tuple.Pair;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;
import static org.junit.Assert.assertTrue;
/**
 * Unit tests for {@link SlowNodeDetector}.
 */
public class TestSlowNodeDetector {
  public static final Logger LOG =
      LoggerFactory.getLogger(TestSlowNodeDetector.class);
  /**
   * Set a timeout for every test case.
   */
  @Rule
  public Timeout testTimeout = new Timeout(300_000);
  private final static double LOW_THRESHOLD = 1000;
  private final static long MIN_OUTLIER_DETECTION_PEERS = 3;
  // Randomly generated test cases for median and MAD. The first entry
  // in each pair is the expected median and the second entry is the
  // expected Median Absolute Deviation. The small sets of size 1 and 2
  // exist to test the edge cases however in practice the MAD of a very
  // small set is not useful.
  private Map<List<Double>, Pair<Double, Double>> medianTestMatrix =
      new ImmutableMap.Builder<List<Double>, Pair<Double, Double>>()
          // Single element.
          .put(new ImmutableList.Builder<Double>()
                  .add(9.6502431302).build(),
              Pair.of(9.6502431302, 0.0))
          // Two elements.
          .put(new ImmutableList.Builder<Double>()
                  .add(1.72168104625)
                  .add(11.7872544459).build(),
              Pair.of(6.75446774606, 7.4616095611))
          // The Remaining lists were randomly generated with sizes 3-10.
          .put(new ImmutableList.Builder<Double>()
                  .add(76.2635686249)
                  .add(27.0652018553)
                  .add(1.3868476443)
                  .add(49.7194624164)
                  .add(47.385680883)
                  .add(57.8721199173).build(),
              Pair.of(48.5525716497, 22.837202532))
          .put(new ImmutableList.Builder<Double>()
                  .add(86.0573389581)
                  .add(93.2399572424)
                  .add(64.9545429122)
                  .add(35.8509730085)
                  .add(1.6534313654).build(),
              Pair.of(64.9545429122, 41.9360180373))
          .put(new ImmutableList.Builder<Double>()
                  .add(5.00127007366)
                  .add(37.9790589127)
                  .add(67.5784746266).build(),
              Pair.of(37.9790589127, 43.8841594039))
          .put(new ImmutableList.Builder<Double>()
                  .add(1.43442932944)
                  .add(70.6769829947)
                  .add(37.47579656)
                  .add(51.1126141394)
                  .add(72.2465914419)
                  .add(32.2930549225)
                  .add(39.677459781).build(),
              Pair.of(39.677459781, 16.9537852208))
          .put(new ImmutableList.Builder<Double>()
                  .add(26.7913745214)
                  .add(68.9833706658)
                  .add(29.3882180746)
                  .add(68.3455244453)
                  .add(74.9277265022)
                  .add(12.1469972942)
                  .add(72.5395402683)
                  .add(7.87917492506)
                  .add(33.3253447774)
                  .add(72.2753759125).build(),
              Pair.of(50.8354346113, 31.9881230079))
          .put(new ImmutableList.Builder<Double>()
                  .add(38.6482290705)
                  .add(88.0690746319)
                  .add(50.6673611649)
                  .add(64.5329814115)
                  .add(25.2580979294)
                  .add(59.6709630711)
                  .add(71.5406993741)
                  .add(81.3073035091)
                  .add(20.5549547284).build(),
              Pair.of(59.6709630711, 31.1683520683))
          .put(new ImmutableList.Builder<Double>()
                  .add(87.352734249)
                  .add(65.4760359094)
                  .add(28.9206803169)
                  .add(36.5908574008)
                  .add(87.7407653175)
                  .add(99.3704511335)
                  .add(41.3227434076)
                  .add(46.2713494909)
                  .add(3.49940920921).build(),
              Pair.of(46.2713494909, 28.4729106898))
          .put(new ImmutableList.Builder<Double>()
                  .add(95.3251533286)
                  .add(27.2777870437)
                  .add(43.73477168).build(),
              Pair.of(43.73477168, 24.3991619317))
          .build();
  // A test matrix that maps inputs to the expected output list of
  // slow nodes i.e. outliers.
  private Map<Map<String, Double>, Set<String>> outlierTestMatrix =
      new ImmutableMap.Builder<Map<String, Double>, Set<String>>()
          // The number of samples is too low and all samples are below
          // the low threshold. Nothing should be returned.
          .put(ImmutableMap.of(
              "n1", 0.0,
              "n2", LOW_THRESHOLD + 1),
              ImmutableSet.of())
          // A statistical outlier below the low threshold must not be
          // returned.
          .put(ImmutableMap.of(
              "n1", 1.0,
              "n2", 1.0,
              "n3", LOW_THRESHOLD - 1),
              ImmutableSet.of())
          // A statistical outlier above the low threshold must be returned.
          .put(ImmutableMap.of(
              "n1", 1.0,
              "n2", 1.0,
              "n3", LOW_THRESHOLD + 1),
              ImmutableSet.of("n3"))
          // A statistical outlier must not be returned if it is within a
          // MEDIAN_MULTIPLIER multiple of the median.
          .put(ImmutableMap.of(
              "n1", LOW_THRESHOLD + 0.1,
              "n2", LOW_THRESHOLD + 0.1,
              "n3", LOW_THRESHOLD * SlowNodeDetector.MEDIAN_MULTIPLIER - 0.1),
              ImmutableSet.of())
          // A statistical outlier must be returned if it is outside a
          // MEDIAN_MULTIPLIER multiple of the median.
          .put(ImmutableMap.of(
              "n1", LOW_THRESHOLD + 0.1,
              "n2", LOW_THRESHOLD + 0.1,
              "n3", (LOW_THRESHOLD + 0.1) *
                  SlowNodeDetector.MEDIAN_MULTIPLIER + 0.1),
              ImmutableSet.of("n3"))
          // Only the statistical outliers n3 and n11 should be returned.
          .put(new ImmutableMap.Builder<String, Double>()
                  .put("n1", 1029.4322)
                  .put("n2", 2647.876)
                  .put("n3", 9194.312)
                  .put("n4", 2.2)
                  .put("n5", 2012.92)
                  .put("n6", 1843.81)
                  .put("n7", 1201.43)
                  .put("n8", 6712.01)
                  .put("n9", 3278.554)
                  .put("n10", 2091.765)
                  .put("n11", 9194.77).build(),
              ImmutableSet.of("n3", "n11"))
          // The following input set has multiple outliers.
          //   - The low outliers (n4, n6) should not be returned.
          //   - High outlier n2 is within 3 multiples of the median
          //     and so it should not be returned.
          //   - Only the high outlier n8 should be returned.
          .put(new ImmutableMap.Builder<String, Double>()
                  .put("n1", 5002.0)
                  .put("n2", 9001.0)
                  .put("n3", 5004.0)
                  .put("n4", 1001.0)
                  .put("n5", 5003.0)
                  .put("n6", 2001.0)
                  .put("n7", 5000.0)
                  .put("n8", 101002.0)
                  .put("n9", 5001.0)
                  .put("n10", 5002.0)
                  .put("n11", 5105.0)
                  .put("n12", 5006.0).build(),
              ImmutableSet.of("n8"))
          .build();
  private SlowNodeDetector slowNodeDetector;
  @Before
  public void setup() {
    slowNodeDetector = new SlowNodeDetector((long) LOW_THRESHOLD);
    SlowNodeDetector.setMinOutlierDetectionPeers(MIN_OUTLIER_DETECTION_PEERS);
    GenericTestUtils.setLogLevel(SlowNodeDetector.LOG, Level.ALL);
  }
  @Test
  public void testOutliersFromTestMatrix() {
    for (Map.Entry<Map<String, Double>, Set<String>> entry :
        outlierTestMatrix.entrySet()) {
      LOG.info("Verifying set {}", entry.getKey());
      final Set<String> outliers =
          slowNodeDetector.getOutliers(entry.getKey()).keySet();
      assertTrue(
          "Running outlier detection on " + entry.getKey() +
              " was expected to yield set " + entry.getValue() + ", but " +
              " we got set " + outliers,
          outliers.equals(entry.getValue()));
    }
  }
  /**
   * Unit test for {@link SlowNodeDetector#computeMedian(List)}.
   */
  @Test
  public void testMediansFromTestMatrix() {
    for (Map.Entry<List<Double>, Pair<Double, Double>> entry :
        medianTestMatrix.entrySet()) {
      final List<Double> inputList = new ArrayList<>(entry.getKey());
      Collections.sort(inputList);
      final Double median = SlowNodeDetector.computeMedian(inputList);
      final Double expectedMedian = entry.getValue().getLeft();
      // Ensure that the median is within 0.001% of expected.
      // We need some fudge factor for floating point comparison.
      final Double errorPercent =
          Math.abs(median - expectedMedian) * 100.0 / expectedMedian;
      assertTrue(
          "Set " + inputList + "; Expected median: " +
              expectedMedian + ", got: " + median,
          errorPercent < 0.001);
    }
  }
  /**
   * Unit test for {@link SlowNodeDetector#computeMad(List)}.
   */
  @Test
  public void testMadsFromTestMatrix() {
    for (Map.Entry<List<Double>, Pair<Double, Double>> entry :
        medianTestMatrix.entrySet()) {
      final List<Double> inputList = new ArrayList<>(entry.getKey());
      Collections.sort(inputList);
      final Double mad = SlowNodeDetector.computeMad(inputList);
      final Double expectedMad = entry.getValue().getRight();
      // Ensure that the MAD is within 0.001% of expected.
      // We need some fudge factor for floating point comparison.
      if (entry.getKey().size() > 1) {
        final Double errorPercent =
            Math.abs(mad - expectedMad) * 100.0 / expectedMad;
        assertTrue(
            "Set " + entry.getKey() + "; Expected M.A.D.: " +
                expectedMad + ", got: " + mad,
            errorPercent < 0.001);
      } else {
        // For an input list of size 1, the MAD should be 0.0.
        final Double epsilon = 0.000001; // Allow for some FP math error.
        assertTrue(
            "Set " + entry.getKey() + "; Expected M.A.D.: " +
                expectedMad + ", got: " + mad,
            mad < epsilon);
      }
    }
  }
  /**
   * Verify that {@link SlowNodeDetector#computeMedian(List)} throws when
   * passed an empty list.
   */
  @Test(expected=IllegalArgumentException.class)
  public void testMedianOfEmptyList() {
    SlowNodeDetector.computeMedian(Collections.emptyList());
  }
  /**
   * Verify that {@link SlowNodeDetector#computeMad(List)} throws when
   * passed an empty list.
   */
  @Test(expected=IllegalArgumentException.class)
  public void testMadOfEmptyList() {
    SlowNodeDetector.computeMedian(Collections.emptyList());
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/AppPriorityACLGroup.java 
 | 108 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.api.records.Priority;
/**
 * PriorityACLGroup will hold all ACL related information per priority.
 *
 */
public class AppPriorityACLGroup implements Comparable<AppPriorityACLGroup> {
  private Priority maxPriority = null;
  private Priority defaultPriority = null;
  private AccessControlList aclList = null;
  public AppPriorityACLGroup(Priority maxPriority, Priority defaultPriority,
      AccessControlList aclList) {
    this.setMaxPriority(Priority.newInstance(maxPriority.getPriority()));
    this.setDefaultPriority(
        Priority.newInstance(defaultPriority.getPriority()));
    this.setACLList(aclList);
  }
  public AppPriorityACLGroup() {
  }
  @Override
  public int compareTo(AppPriorityACLGroup o) {
    return getMaxPriority().compareTo(o.getMaxPriority());
  }
  @Override
  public boolean equals(Object obj) {
    if (this == obj) {
      return true;
    }
    if (obj == null) {
      return false;
    }
    if (getClass() != obj.getClass()) {
      return false;
    }
    AppPriorityACLGroup other = (AppPriorityACLGroup) obj;
    if (getMaxPriority() != other.getMaxPriority()) {
      return false;
    }
    if (getDefaultPriority() != other.getDefaultPriority()) {
      return false;
    }
    return true;
  }
  @Override
  public int hashCode() {
    final int prime = 517861;
    int result = 9511;
    result = prime * result + getMaxPriority().getPriority();
    result = prime * result + getDefaultPriority().getPriority();
    return result;
  }
  public Priority getMaxPriority() {
    return maxPriority;
  }
  public Priority getDefaultPriority() {
    return defaultPriority;
  }
  public AccessControlList getACLList() {
    return aclList;
  }
  public void setMaxPriority(Priority maxPriority) {
    this.maxPriority = maxPriority;
  }
  public void setDefaultPriority(Priority defaultPriority) {
    this.defaultPriority = defaultPriority;
  }
  public void setACLList(AccessControlList accessControlList) {
    this.aclList = accessControlList;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriorityACLs.java 
 | 206 
							 | 
	/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements.  See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership.  The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License.  You may obtain a copy of the License at
*
*     http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.yarn.api.ApplicationClientProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.resourcemanager.ACLsTestBase;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.junit.Assert;
import org.junit.Test;
public class TestApplicationPriorityACLs extends ACLsTestBase {
  private final int defaultPriorityQueueA = 3;
  private final int defaultPriorityQueueB = 10;
  private final int maxPriorityQueueA = 5;
  private final int maxPriorityQueueB = 11;
  private final int clusterMaxPriority = 10;
  @Test
  public void testApplicationACLs() throws Exception {
    /*
     * Cluster Max-priority is 10. User 'queueA_user' has permission to submit
     * apps only at priority 5. Default priority for this user is 3.
     */
    // Case 1: App will be submitted with priority 5.
    verifyAppSubmitWithPrioritySuccess(QUEUE_A_USER, QUEUEA, 5);
    // Case 2: App will be rejected as submitted priority was 6.
    verifyAppSubmitWithPriorityFailure(QUEUE_A_USER, QUEUEA, 6);
    // Case 3: App will be submitted w/o priority, hence consider default 3.
    verifyAppSubmitWithPrioritySuccess(QUEUE_A_USER, QUEUEA, -1);
    // Case 4: App will be submitted with priority 11.
    verifyAppSubmitWithPrioritySuccess(QUEUE_B_USER, QUEUEB, 11);
  }
  private void verifyAppSubmitWithPrioritySuccess(String submitter,
      String queueName, int priority) throws Exception {
    Priority appPriority = null;
    if (priority > 0) {
      appPriority = Priority.newInstance(priority);
    } else {
      // RM will consider default priority for the submitted user. So update
      // priority to the default value to compare.
      priority = defaultPriorityQueueA;
    }
    ApplicationSubmissionContext submissionContext = prepareForAppSubmission(
        submitter, queueName, appPriority);
    submitAppToRMWithValidAcl(submitter, submissionContext);
    // Ideally get app report here and check the priority.
    verifyAppPriorityIsAccepted(submitter, submissionContext.getApplicationId(),
        priority);
  }
  private void verifyAppSubmitWithPriorityFailure(String submitter,
      String queueName, int priority) throws Exception {
    Priority appPriority = Priority.newInstance(priority);
    ApplicationSubmissionContext submissionContext = prepareForAppSubmission(
        submitter, queueName, appPriority);
    submitAppToRMWithInValidAcl(submitter, submissionContext);
  }
  private ApplicationSubmissionContext prepareForAppSubmission(String submitter,
      String queueName, Priority priority) throws Exception {
    GetNewApplicationRequest newAppRequest = GetNewApplicationRequest
        .newInstance();
    ApplicationClientProtocol submitterClient = getRMClientForUser(submitter);
    ApplicationId applicationId = submitterClient
        .getNewApplication(newAppRequest).getApplicationId();
    Resource resource = BuilderUtils.newResource(1024, 1);
    ContainerLaunchContext amContainerSpec = ContainerLaunchContext
        .newInstance(null, null, null, null, null, null);
    ApplicationSubmissionContext appSubmissionContext = ApplicationSubmissionContext
        .newInstance(applicationId, "applicationName", queueName, null,
            amContainerSpec, false, true, 1, resource, "applicationType");
    appSubmissionContext.setApplicationId(applicationId);
    appSubmissionContext.setQueue(queueName);
    if (null != priority) {
      appSubmissionContext.setPriority(priority);
    }
    return appSubmissionContext;
  }
  private void submitAppToRMWithValidAcl(String submitter,
      ApplicationSubmissionContext appSubmissionContext)
      throws YarnException, IOException, InterruptedException {
    ApplicationClientProtocol submitterClient = getRMClientForUser(submitter);
    SubmitApplicationRequest submitRequest = SubmitApplicationRequest
        .newInstance(appSubmissionContext);
    submitterClient.submitApplication(submitRequest);
    resourceManager.waitForState(appSubmissionContext.getApplicationId(),
        RMAppState.ACCEPTED);
  }
  private void submitAppToRMWithInValidAcl(String submitter,
      ApplicationSubmissionContext appSubmissionContext)
      throws YarnException, IOException, InterruptedException {
    ApplicationClientProtocol submitterClient = getRMClientForUser(submitter);
    SubmitApplicationRequest submitRequest = SubmitApplicationRequest
        .newInstance(appSubmissionContext);
    try {
      submitterClient.submitApplication(submitRequest);
    } catch (YarnException ex) {
      Assert.assertTrue(ex.getCause() instanceof RemoteException);
    }
  }
  private void verifyAppPriorityIsAccepted(String submitter,
      ApplicationId applicationId, int priority)
      throws IOException, InterruptedException {
    ApplicationClientProtocol submitterClient = getRMClientForUser(submitter);
    /**
     * If priority is greater than cluster max, RM will auto set to cluster max
     * Consider this scenario as a special case.
     */
    if (priority > clusterMaxPriority) {
      priority = clusterMaxPriority;
    }
    GetApplicationReportRequest request = GetApplicationReportRequest
        .newInstance(applicationId);
    try {
      GetApplicationReportResponse response = submitterClient
          .getApplicationReport(request);
      Assert.assertEquals(response.getApplicationReport().getPriority(),
          Priority.newInstance(priority));
    } catch (YarnException e) {
      Assert.fail("Application submission should not fail.");
    }
  }
  @Override
  protected Configuration createConfiguration() {
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    csConf.setQueues(CapacitySchedulerConfiguration.ROOT,
        new String[]{QUEUEA, QUEUEB, QUEUEC});
    csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + QUEUEA, 50f);
    csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + QUEUEB, 25f);
    csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + QUEUEC, 25f);
    String[] aclsForA = new String[2];
    aclsForA[0] = QUEUE_A_USER;
    aclsForA[1] = QUEUE_A_GROUP;
    csConf.setPriorityAcls(CapacitySchedulerConfiguration.ROOT + "." + QUEUEA,
        Priority.newInstance(maxPriorityQueueA),
        Priority.newInstance(defaultPriorityQueueA), aclsForA);
    String[] aclsForB = new String[2];
    aclsForB[0] = QUEUE_B_USER;
    aclsForB[1] = QUEUE_B_GROUP;
    csConf.setPriorityAcls(CapacitySchedulerConfiguration.ROOT + "." + QUEUEB,
        Priority.newInstance(maxPriorityQueueB),
        Priority.newInstance(defaultPriorityQueueB), aclsForB);
    csConf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
    csConf.set(YarnConfiguration.RM_SCHEDULER,
        CapacityScheduler.class.getName());
    return csConf;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestUnsetAndChangeDirectoryEcPolicy.java 
 | 366 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.namenode.ErasureCodingPolicyManager;
import org.apache.hadoop.io.erasurecode.CodecUtil;
import org.apache.hadoop.io.erasurecode.ErasureCodeNative;
import org.apache.hadoop.io.erasurecode.rawcoder.NativeRSRawErasureCoderFactory;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.junit.Assert;
import java.io.FileNotFoundException;
import java.io.IOException;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.junit.Assert.fail;
/**
 * Test unset and change directory's erasure coding policy.
 */
public class TestUnsetAndChangeDirectoryEcPolicy {
  public static final Log LOG =
      LogFactory.getLog(TestUnsetAndChangeDirectoryEcPolicy.class);
  private MiniDFSCluster cluster;
  private Configuration conf = new Configuration();
  private DistributedFileSystem fs;
  private ErasureCodingPolicy ecPolicy = ErasureCodingPolicyManager
      .getSystemDefaultPolicy();
  private final short dataBlocks = (short) ecPolicy.getNumDataUnits();
  private final short parityBlocks = (short) ecPolicy.getNumParityUnits();
  private final int cellSize = ecPolicy.getCellSize();
  private final int stripsPerBlock = 2;
  private final int blockSize = stripsPerBlock * cellSize;
  private final int blockGroupSize =  dataBlocks * blockSize;
  @Rule
  public Timeout globalTimeout = new Timeout(300000);
  @Before
  public void setup() throws IOException {
    conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize);
    conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY, 0);
    if (ErasureCodeNative.isNativeCodeLoaded()) {
      conf.set(
          CodecUtil.IO_ERASURECODE_CODEC_RS_DEFAULT_RAWCODER_KEY,
          NativeRSRawErasureCoderFactory.class.getCanonicalName());
    }
    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
        dataBlocks + parityBlocks).build();
    cluster.waitActive();
    fs = cluster.getFileSystem();
  }
  @After
  public void tearDown() {
    if (cluster != null) {
      cluster.shutdown();
      cluster = null;
    }
  }
  /*
   * Test unset EC policy on directory.
   */
  @Test
  public void testUnsetEcPolicy() throws Exception {
    final int numBlocks = 1;
    final int fileLen = blockGroupSize * numBlocks;
    final Path dirPath = new Path("/striped");
    final Path ecFilePath = new Path(dirPath, "ec_file");
    final Path replicateFilePath = new Path(dirPath, "3x_file");
    fs.mkdirs(dirPath);
    // Test unset a directory which has no EC policy
    fs.unsetErasureCodingPolicy(dirPath);
    // Set EC policy on directory
    fs.setErasureCodingPolicy(dirPath, ecPolicy);
    DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
    fs.unsetErasureCodingPolicy(dirPath);
    DFSTestUtil.createFile(fs, replicateFilePath, fileLen, (short) 1, 0L);
    // ec_file should has EC policy
    ErasureCodingPolicy tempEcPolicy =
        fs.getErasureCodingPolicy(ecFilePath);
    Assert.assertTrue("Erasure coding policy mismatch!",
        tempEcPolicy.getName().equals(ecPolicy.getName()));
    // rep_file should not have EC policy
    tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath);
    Assert.assertNull("Replicate file should not have erasure coding policy!",
        tempEcPolicy);
    // Directory should not return erasure coding policy
    tempEcPolicy = fs.getErasureCodingPolicy(dirPath);
    Assert.assertNull("Directory should no have erasure coding policy set!",
        tempEcPolicy);
    fs.delete(dirPath, true);
  }
  /*
  * Test nested directory with different EC policy.
  */
  @Test
  public void testNestedEcPolicy() throws Exception {
    final int numBlocks = 1;
    final int fileLen = blockGroupSize * numBlocks;
    final Path parentDir = new Path("/ec-6-3");
    final Path childDir = new Path("/ec-6-3/ec-3-2");
    final Path ec63FilePath = new Path(childDir, "ec_6_3_file");
    final Path ec32FilePath = new Path(childDir, "ec_3_2_file");
    final Path ec63FilePath2 = new Path(childDir, "ec_6_3_file_2");
    final ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager
        .getPolicyByPolicyID(HdfsConstants.RS_3_2_POLICY_ID);
    fs.mkdirs(parentDir);
    fs.setErasureCodingPolicy(parentDir, ecPolicy);
    fs.mkdirs(childDir);
    // Create RS(6,3) EC policy file
    DFSTestUtil.createFile(fs, ec63FilePath, fileLen, (short) 1, 0L);
    // Set RS(3,2) EC policy on child directory
    fs.setErasureCodingPolicy(childDir, ec32Policy);
    // Create RS(3,2) EC policy file
    DFSTestUtil.createFile(fs, ec32FilePath, fileLen, (short) 1, 0L);
    // Start to check
    // ec_6_3_file should has RS-6-3 EC policy
    ErasureCodingPolicy tempEcPolicy =
        fs.getErasureCodingPolicy(ec63FilePath);
    Assert.assertTrue("Erasure coding policy mismatch!",
        tempEcPolicy.getName().equals(ecPolicy.getName()));
    // ec_3_2_file should have RS-3-2 policy
    tempEcPolicy = fs.getErasureCodingPolicy(ec32FilePath);
    Assert.assertTrue("Erasure coding policy mismatch!",
        tempEcPolicy.getName().equals(ec32Policy.getName()));
    // Child directory should have RS-3-2 policy
    tempEcPolicy = fs.getErasureCodingPolicy(childDir);
    Assert.assertTrue(
        "Directory should have erasure coding policy set!",
        tempEcPolicy.getName().equals(ec32Policy.getName()));
    // Unset EC policy on child directory
    fs.unsetErasureCodingPolicy(childDir);
    DFSTestUtil.createFile(fs, ec63FilePath2, fileLen, (short) 1, 0L);
    // ec_6_3_file_2 should have RS-6-3 policy
    tempEcPolicy = fs.getErasureCodingPolicy(ec63FilePath2);
    Assert.assertTrue("Erasure coding policy mismatch!",
        tempEcPolicy.getName().equals(ecPolicy.getName()));
    // Child directory should have RS-6-3 policy now
    tempEcPolicy = fs.getErasureCodingPolicy(childDir);
    Assert.assertTrue(
        "Directory should have erasure coding policy set!",
        tempEcPolicy.getName().equals(ecPolicy.getName()));
    fs.delete(parentDir, true);
  }
  /*
   * Test unset EC policy on root directory.
   */
  @Test
  public void testUnsetRootDirEcPolicy() throws Exception {
    final int numBlocks = 1;
    final int fileLen = blockGroupSize * numBlocks;
    final Path rootPath = new Path("/");
    final Path ecFilePath = new Path(rootPath, "ec_file");
    final Path replicateFilePath = new Path(rootPath, "rep_file");
    // Test unset root path which has no EC policy
    fs.unsetErasureCodingPolicy(rootPath);
    // Set EC policy on root path
    fs.setErasureCodingPolicy(rootPath, ecPolicy);
    DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
    fs.unsetErasureCodingPolicy(rootPath);
    DFSTestUtil.createFile(fs, replicateFilePath, fileLen, (short) 1, 0L);
    // ec_file should has EC policy set
    ErasureCodingPolicy tempEcPolicy =
        fs.getErasureCodingPolicy(ecFilePath);
    Assert.assertTrue("Erasure coding policy mismatch!",
        tempEcPolicy.getName().equals(ecPolicy.getName()));
    // rep_file should not have EC policy set
    tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath);
    Assert.assertNull("Replicate file should not have erasure coding policy!",
        tempEcPolicy);
    // Directory should not return erasure coding policy
    tempEcPolicy = fs.getErasureCodingPolicy(rootPath);
    Assert.assertNull("Directory should not have erasure coding policy set!",
        tempEcPolicy);
    fs.delete(rootPath, true);
  }
  /*
  * Test change EC policy on root directory.
  */
  @Test
  public void testChangeRootDirEcPolicy() throws Exception {
    final int numBlocks = 1;
    final int fileLen = blockGroupSize * numBlocks;
    final Path rootPath = new Path("/");
    final Path ec63FilePath = new Path(rootPath, "ec_6_3_file");
    final Path ec32FilePath = new Path(rootPath, "ec_3_2_file");
    final ErasureCodingPolicy ec32Policy = ErasureCodingPolicyManager
        .getPolicyByPolicyID(HdfsConstants.RS_3_2_POLICY_ID);
    fs.unsetErasureCodingPolicy(rootPath);
    fs.setErasureCodingPolicy(rootPath, ecPolicy);
    // Create RS(6,3) EC policy file
    DFSTestUtil.createFile(fs, ec63FilePath, fileLen, (short) 1, 0L);
    // Change EC policy from RS(6,3) to RS(3,2)
    fs.setErasureCodingPolicy(rootPath, ec32Policy);
    DFSTestUtil.createFile(fs, ec32FilePath, fileLen, (short) 1, 0L);
    // start to check
    // ec_6_3_file should has RS-6-3 ec policy set
    ErasureCodingPolicy tempEcPolicy =
        fs.getErasureCodingPolicy(ec63FilePath);
    Assert.assertTrue("Erasure coding policy mismatch!",
        tempEcPolicy.getName().equals(ecPolicy.getName()));
    // ec_3_2_file should have RS-3-2 policy
    tempEcPolicy = fs.getErasureCodingPolicy(ec32FilePath);
    Assert.assertTrue("Erasure coding policy mismatch!",
        tempEcPolicy.getName().equals(ec32Policy.getName()));
    // Root directory should have RS-3-2 policy
    tempEcPolicy = fs.getErasureCodingPolicy(rootPath);
    Assert.assertTrue(
        "Directory should have erasure coding policy!",
        tempEcPolicy.getName().equals(ec32Policy.getName()));
    fs.delete(rootPath, true);
  }
  /*
   * Test different replica factor files.
   */
  @Test
  public void testDifferentReplicaFactor() throws Exception {
    final int numBlocks = 1;
    final int fileLen = blockGroupSize * numBlocks;
    final Path ecDirPath = new Path("/striped");
    final Path ecFilePath = new Path(ecDirPath, "ec_file");
    final Path replicateFilePath = new Path(ecDirPath, "rep_file");
    final Path replicateFilePath2 = new Path(ecDirPath, "rep_file2");
    fs.mkdirs(ecDirPath);
    fs.setErasureCodingPolicy(ecDirPath, ecPolicy);
    DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
    fs.unsetErasureCodingPolicy(ecDirPath);
    DFSTestUtil.createFile(fs, replicateFilePath, fileLen, (short) 3, 0L);
    DFSTestUtil.createFile(fs, replicateFilePath2, fileLen, (short) 2, 0L);
    // ec_file should has EC policy set
    ErasureCodingPolicy tempEcPolicy =
        fs.getErasureCodingPolicy(ecFilePath);
    Assert.assertTrue("Erasure coding policy mismatch!",
        tempEcPolicy.getName().equals(ecPolicy.getName()));
    // rep_file should not have EC policy set
    tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath);
    Assert.assertNull("Replicate file should not have erasure coding policy!",
        tempEcPolicy);
    tempEcPolicy = fs.getErasureCodingPolicy(replicateFilePath2);
    Assert.assertNull("Replicate file should not have erasure coding policy!",
        tempEcPolicy);
    // Directory should not return erasure coding policy
    tempEcPolicy = fs.getErasureCodingPolicy(ecDirPath);
    Assert.assertNull("Directory should not have erasure coding policy set!",
        tempEcPolicy);
    fs.delete(ecDirPath, true);
  }
  /*
   * Test set and unset EC policy on directory doesn't exist.
   */
  @Test
  public void testNonExistentDir() throws Exception {
    final Path dirPath = new Path("/striped");
    // Unset EC policy on non-existent directory
    try {
      fs.unsetErasureCodingPolicy(dirPath);
      fail("FileNotFoundException should be thrown for a non-existent"
          + " file path");
    } catch (FileNotFoundException e) {
      assertExceptionContains("Path not found: " + dirPath, e);
    }
    // Set EC policy on non-existent directory
    try {
      fs.setErasureCodingPolicy(dirPath, ecPolicy);
      fail("FileNotFoundException should be thrown for a non-existent"
          + " file path");
    } catch (FileNotFoundException e) {
      assertExceptionContains("Path not found: " + dirPath, e);
    }
  }
  /*
   * Test set and unset EC policy on file.
   */
  @Test
  public void testEcPolicyOnFile() throws Exception {
    final Path ecFilePath = new Path("/striped_file");
    final int fileLen = blockGroupSize * 2;
    DFSTestUtil.createFile(fs, ecFilePath, fileLen, (short) 1, 0L);
    // Set EC policy on file
    try {
      fs.setErasureCodingPolicy(ecFilePath, ecPolicy);
      fail("IOException should be thrown for setting EC policy on file");
    } catch (IOException e) {
      assertExceptionContains("Attempt to set an erasure coding policy " +
          "for a file " + ecFilePath, e);
    }
    // Unset EC policy on file
    try {
      fs.unsetErasureCodingPolicy(ecFilePath);
      fail("IOException should be thrown for unsetting EC policy on file");
    } catch (IOException e) {
      assertExceptionContains("Cannot unset an erasure coding policy on a file "
          + ecFilePath, e);
    }
  }
} 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardDecompressor.java 
 | 323 
							 | 
	/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.io.compress.zstd;
import org.apache.hadoop.io.compress.Decompressor;
import org.apache.hadoop.io.compress.DirectDecompressor;
import org.apache.hadoop.util.NativeCodeLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
 * A {@link Decompressor} based on the zStandard compression algorithm.
 * https://github.com/facebook/zstd
 */
public class ZStandardDecompressor implements Decompressor {
  private static final Logger LOG =
      LoggerFactory.getLogger(ZStandardDecompressor.class);
  private long stream;
  private int directBufferSize;
  private ByteBuffer compressedDirectBuf = null;
  private int compressedDirectBufOff, bytesInCompressedBuffer;
  private ByteBuffer uncompressedDirectBuf = null;
  private byte[] userBuf = null;
  private int userBufOff = 0, userBufferBytesToConsume = 0;
  private boolean finished;
  private int remaining = 0;
  private static boolean nativeZStandardLoaded = false;
  static {
    if (NativeCodeLoader.isNativeCodeLoaded()) {
      try {
        // Initialize the native library
        initIDs();
        nativeZStandardLoaded = true;
      } catch (Throwable t) {
        LOG.warn("Error loading zstandard native libraries: " + t);
      }
    }
  }
  public static boolean isNativeCodeLoaded() {
    return nativeZStandardLoaded;
  }
  public static int getRecommendedBufferSize() {
    return getStreamSize();
  }
  public ZStandardDecompressor() {
    this(getStreamSize());
  }
  /**
   * Creates a new decompressor.
   */
  public ZStandardDecompressor(int bufferSize) {
    this.directBufferSize = bufferSize;
    compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
    uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
    uncompressedDirectBuf.position(directBufferSize);
    stream = create();
    reset();
  }
  @Override
  public void setInput(byte[] b, int off, int len) {
    if (b == null) {
      throw new NullPointerException();
    }
    if (off < 0 || len < 0 || off > b.length - len) {
      throw new ArrayIndexOutOfBoundsException();
    }
    this.userBuf = b;
    this.userBufOff = off;
    this.userBufferBytesToConsume = len;
    setInputFromSavedData();
    uncompressedDirectBuf.limit(directBufferSize);
    uncompressedDirectBuf.position(directBufferSize);
  }
  private void setInputFromSavedData() {
    compressedDirectBufOff = 0;
    bytesInCompressedBuffer = userBufferBytesToConsume;
    if (bytesInCompressedBuffer > directBufferSize) {
      bytesInCompressedBuffer = directBufferSize;
    }
    compressedDirectBuf.rewind();
    compressedDirectBuf.put(
        userBuf, userBufOff, bytesInCompressedBuffer);
    userBufOff += bytesInCompressedBuffer;
    userBufferBytesToConsume -= bytesInCompressedBuffer;
  }
  // dictionary is not supported
  @Override
  public void setDictionary(byte[] b, int off, int len) {
    throw new UnsupportedOperationException(
        "Dictionary support is not enabled");
  }
  @Override
  public boolean needsInput() {
    // Consume remaining compressed data?
    if (uncompressedDirectBuf.remaining() > 0) {
      return false;
    }
    // Check if we have consumed all input
    if (bytesInCompressedBuffer - compressedDirectBufOff <= 0) {
      // Check if we have consumed all user-input
      if (userBufferBytesToConsume <= 0) {
        return true;
      } else {
        setInputFromSavedData();
      }
    }
    return false;
  }
  // dictionary is not supported.
  @Override
  public boolean needsDictionary() {
    return false;
  }
  @Override
  public boolean finished() {
    // finished == true if ZSTD_decompressStream() returns 0
    // also check we have nothing left in our buffer
    return (finished && uncompressedDirectBuf.remaining() == 0);
  }
  @Override
  public int decompress(byte[] b, int off, int len)
      throws IOException {
    checkStream();
    if (b == null) {
      throw new NullPointerException();
    }
    if (off < 0 || len < 0 || off > b.length - len) {
      throw new ArrayIndexOutOfBoundsException();
    }
    // Check if there is uncompressed data
    int n = uncompressedDirectBuf.remaining();
    if (n > 0) {
      return populateUncompressedBuffer(b, off, len, n);
    }
    // Re-initialize the output direct buffer
    uncompressedDirectBuf.rewind();
    uncompressedDirectBuf.limit(directBufferSize);
    // Decompress data
    n = inflateBytesDirect(
        compressedDirectBuf,
        compressedDirectBufOff,
        bytesInCompressedBuffer,
        uncompressedDirectBuf,
        0,
        directBufferSize
    );
    uncompressedDirectBuf.limit(n);
    // Get at most 'len' bytes
    return populateUncompressedBuffer(b, off, len, n);
  }
  /**
   * <p>Returns the number of bytes remaining in the input buffers;
   * normally called when finished() is true to determine amount of post-stream
   * data.</p>
   *
   * @return the total (non-negative) number of unprocessed bytes in input
   */
  @Override
  public int getRemaining() {
    checkStream();
    // userBuf + compressedDirectBuf
    return userBufferBytesToConsume + remaining;
  }
  /**
   * Resets everything including the input buffers (user and direct).
   */
  @Override
  public void reset() {
    checkStream();
    init(stream);
    remaining = 0;
    finished = false;
    compressedDirectBufOff = 0;
    bytesInCompressedBuffer = 0;
    uncompressedDirectBuf.limit(directBufferSize);
    uncompressedDirectBuf.position(directBufferSize);
    userBufOff = 0;
    userBufferBytesToConsume = 0;
  }
  @Override
  public void end() {
    if (stream != 0) {
      free(stream);
      stream = 0;
    }
  }
  @Override
  protected void finalize() {
    reset();
  }
  private void checkStream() {
    if (stream == 0) {
      throw new NullPointerException("Stream not initialized");
    }
  }
  private int populateUncompressedBuffer(byte[] b, int off, int len, int n) {
    n = Math.min(n, len);
    uncompressedDirectBuf.get(b, off, n);
    return n;
  }
  private native static void initIDs();
  private native static long create();
  private native static void init(long stream);
  private native int inflateBytesDirect(ByteBuffer src, int srcOffset,
      int srcLen, ByteBuffer dst, int dstOffset, int dstLen);
  private native static void free(long strm);
  private native static int getStreamSize();
  int inflateDirect(ByteBuffer src, ByteBuffer dst) throws IOException {
    assert
        (this instanceof ZStandardDecompressor.ZStandardDirectDecompressor);
    int originalPosition = dst.position();
    int n = inflateBytesDirect(
        src, src.position(), src.remaining(), dst, dst.position(),
        dst.remaining()
    );
    dst.position(originalPosition + n);
    if (bytesInCompressedBuffer > 0) {
      src.position(compressedDirectBufOff);
    } else {
      src.position(src.limit());
    }
    return n;
  }
  /**
   * A {@link DirectDecompressor} for ZStandard
   * https://github.com/facebook/zstd.
   */
  public static class ZStandardDirectDecompressor
      extends ZStandardDecompressor implements DirectDecompressor {
    public ZStandardDirectDecompressor(int directBufferSize) {
      super(directBufferSize);
    }
    @Override
    public boolean finished() {
      return (endOfInput && super.finished());
    }
    @Override
    public void reset() {
      super.reset();
      endOfInput = true;
    }
    private boolean endOfInput;
    @Override
    public void decompress(ByteBuffer src, ByteBuffer dst)
        throws IOException {
      assert dst.isDirect() : "dst.isDirect()";
      assert src.isDirect() : "src.isDirect()";
      assert dst.remaining() > 0 : "dst.remaining() > 0";
      this.inflateDirect(src, dst);
      endOfInput = !src.hasRemaining();
    }
    @Override
    public void setDictionary(byte[] b, int off, int len) {
      throw new UnsupportedOperationException(
          "byte[] arrays are not supported for DirectDecompressor");
    }
    @Override
    public int decompress(byte[] b, int off, int len) {
      throw new UnsupportedOperationException(
          "byte[] arrays are not supported for DirectDecompressor");
    }
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogAggregationUtils.java 
 | 61 
							 | 
	/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements.  See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership.  The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License.  You may obtain a copy of the License at
*
*     http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.logaggregation;
import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import com.google.common.annotations.VisibleForTesting;
import java.io.IOException;
@Private
public class LogAggregationUtils {
  public static final String TMP_FILE_SUFFIX = ".tmp";
  /**
   * Constructs the full filename for an application's log file per node.
   * @param remoteRootLogDir
   * @param appId
   * @param user
   * @param nodeId
   * @param suffix
   * @return the remote log file.
   */
  public static Path getRemoteNodeLogFileForApp(Path remoteRootLogDir,
      ApplicationId appId, String user, NodeId nodeId, String suffix) {
    return new Path(getRemoteAppLogDir(remoteRootLogDir, appId, user, suffix),
        getNodeString(nodeId));
  }
  /**
   * Gets the remote app log dir.
   * @param remoteRootLogDir
   * @param appId
   * @param user
   * @param suffix
   * @return the remote application specific log dir.
   */
  public static Path getRemoteAppLogDir(Path remoteRootLogDir,
      ApplicationId appId, String user, String suffix) {
    return new Path(getRemoteLogSuffixedDir(remoteRootLogDir, user, suffix),
        appId.toString());
  }
  /**
   * Gets the remote suffixed log dir for the user.
   * @param remoteRootLogDir
   * @param user
   * @param suffix
   * @return the remote suffixed log dir.
   */
  public static Path getRemoteLogSuffixedDir(Path remoteRootLogDir,
      String user, String suffix) {
    if (suffix == null || suffix.isEmpty()) {
      return getRemoteLogUserDir(remoteRootLogDir, user);
    }
    // TODO Maybe support suffix to be more than a single file.
    return new Path(getRemoteLogUserDir(remoteRootLogDir, user), suffix);
  }
  /**
   * Gets the remote log user dir.
   * @param remoteRootLogDir
   * @param user
   * @return the remote per user log dir.
   */
  public static Path getRemoteLogUserDir(Path remoteRootLogDir, String user) {
    return new Path(remoteRootLogDir, user);
  }
  /**
   * Returns the suffix component of the log dir.
   * @param conf
   * @return the suffix which will be appended to the user log dir.
   */
  public static String getRemoteNodeLogDirSuffix(Configuration conf) {
    return conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR_SUFFIX,
        YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR_SUFFIX);
  }
  
  /**
   * Converts a nodeId to a form used in the app log file name.
   * @param nodeId
   * @return the node string to be used to construct the file name.
   */
  @VisibleForTesting
  public static String getNodeString(NodeId nodeId) {
    return nodeId.toString().replace(":", "_");
  }
  @VisibleForTesting
  public static String getNodeString(String nodeId) {
    return nodeId.toString().replace(":", "_");
  }
  /**
   * Return the remote application log directory.
   * @param conf the configuration
   * @param appId the application
   * @param appOwner the application owner
   * @return the remote application log directory path
   * @throws IOException if we can not find remote application log directory
   */
  public static org.apache.hadoop.fs.Path getRemoteAppLogDir(
      Configuration conf, ApplicationId appId, String appOwner)
      throws IOException {
    String suffix = LogAggregationUtils.getRemoteNodeLogDirSuffix(conf);
    org.apache.hadoop.fs.Path remoteRootLogDir =
        new org.apache.hadoop.fs.Path(conf.get(
            YarnConfiguration.NM_REMOTE_APP_LOG_DIR,
            YarnConfiguration.DEFAULT_NM_REMOTE_APP_LOG_DIR));
    org.apache.hadoop.fs.Path remoteAppDir = null;
    if (appOwner == null) {
      org.apache.hadoop.fs.Path qualifiedRemoteRootLogDir =
          FileContext.getFileContext(conf).makeQualified(remoteRootLogDir);
      FileContext fc = FileContext.getFileContext(
          qualifiedRemoteRootLogDir.toUri(), conf);
      org.apache.hadoop.fs.Path toMatch = LogAggregationUtils
          .getRemoteAppLogDir(remoteRootLogDir, appId, "*", suffix);
      FileStatus[] matching  = fc.util().globStatus(toMatch);
      if (matching == null || matching.length != 1) {
        throw new IOException("Can not find remote application directory for "
            + "the application:" + appId);
      }
      remoteAppDir = matching[0].getPath();
    } else {
      remoteAppDir = LogAggregationUtils.getRemoteAppLogDir(
          remoteRootLogDir, appId, appOwner, suffix);
    }
    return remoteAppDir;
  }
  /**
   * Get all available log files under remote app log directory.
   * @param conf the configuration
   * @param appId the applicationId
   * @param appOwner the application owner
   * @return the iterator of available log files
   * @throws IOException if there is no log file available
   */
  public static RemoteIterator<FileStatus> getRemoteNodeFileDir(
      Configuration conf, ApplicationId appId, String appOwner)
      throws IOException {
    Path remoteAppLogDir = getRemoteAppLogDir(conf, appId, appOwner);
    RemoteIterator<FileStatus> nodeFiles = null;
    Path qualifiedLogDir =
        FileContext.getFileContext(conf).makeQualified(remoteAppLogDir);
    nodeFiles = FileContext.getFileContext(qualifiedLogDir.toUri(),
        conf).listStatus(remoteAppLogDir);
    return nodeFiles;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/SlowNodeDetector.java 
 | 194 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 * <p>
 * http://www.apache.org/licenses/LICENSE-2.0
 * <p>
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs.server.datanode.metrics;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
/**
 * A utility class to help detect nodes whose aggregate latency
 * is an outlier within a given set.
 *
 * We use the median absolute deviation for outlier detection as
 * described in the following publication:
 *
 * Leys, C., et al., Detecting outliers: Do not use standard deviation
 * around the mean, use absolute deviation around the median.
 * http://dx.doi.org/10.1016/j.jesp.2013.03.013
 *
 * We augment the above scheme with the following heuristics to be even
 * more conservative:
 *
 *  1. Skip outlier detection if the sample size is too small.
 *  2. Never flag nodes whose aggregate latency is below a low threshold.
 *  3. Never flag nodes whose aggregate latency is less than a small
 *     multiple of the median.
 */
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class SlowNodeDetector {
  public static final Logger LOG =
      LoggerFactory.getLogger(SlowNodeDetector.class);
  /**
   * Minimum number of peers to run outlier detection.
   */
  private static long minOutlierDetectionPeers = 10;
  /**
   * The multiplier is from Leys, C. et al.
   */
  private static final double MAD_MULTIPLIER = (double) 1.4826;
  /**
   * Threshold in milliseconds below which a DataNode is definitely not slow.
   */
  private final long lowThresholdMs;
  /**
   * Deviation multiplier. A sample is considered to be an outlier if it
   * exceeds the median by (multiplier * median abs. deviation). 3 is a
   * conservative choice.
   */
  private static final int DEVIATION_MULTIPLIER = 3;
  /**
   * If most of the samples are clustered together, the MAD can be
   * low. The median multiplier introduces another safeguard to avoid
   * overaggressive outlier detection.
   */
  @VisibleForTesting
  static final int MEDIAN_MULTIPLIER = 3;
  public SlowNodeDetector(long lowThresholdMs) {
    this.lowThresholdMs = lowThresholdMs;
  }
  /**
   * Return a set of DataNodes whose latency is much higher than
   * their peers. The input is a map of (node -> aggregate latency)
   * entries.
   *
   * The aggregate may be an arithmetic mean or a percentile e.g.
   * 90th percentile. Percentiles are a better choice than median
   * since latency is usually not a normal distribution.
   *
   * This method allocates temporary memory O(n) and
   * has run time O(n.log(n)), where n = stats.size().
   *
   * @return
   */
  public Map<String, Double> getOutliers(Map<String, Double> stats) {
    if (stats.size() < minOutlierDetectionPeers) {
      LOG.debug("Skipping statistical outlier detection as we don't have " +
              "latency data for enough peers. Have {}, need at least {}",
          stats.size(), minOutlierDetectionPeers);
      return ImmutableMap.of();
    }
    // Compute the median absolute deviation of the aggregates.
    final List<Double> sorted = new ArrayList<>(stats.values());
    Collections.sort(sorted);
    final Double median = computeMedian(sorted);
    final Double mad = computeMad(sorted);
    Double upperLimitLatency = Math.max(
        lowThresholdMs, median * MEDIAN_MULTIPLIER);
    upperLimitLatency = Math.max(
        upperLimitLatency, median + (DEVIATION_MULTIPLIER * mad));
    final Map<String, Double> slowNodes = new HashMap<>();
    LOG.trace("getOutliers: List={}, MedianLatency={}, " +
        "MedianAbsoluteDeviation={}, upperLimitLatency={}",
        sorted, median, mad, upperLimitLatency);
    // Find nodes whose latency exceeds the threshold.
    for (Map.Entry<String, Double> entry : stats.entrySet()) {
      if (entry.getValue() > upperLimitLatency) {
        slowNodes.put(entry.getKey(), entry.getValue());
      }
    }
    return slowNodes;
  }
  /**
   * Compute the Median Absolute Deviation of a sorted list.
   */
  public static Double computeMad(List<Double> sortedValues) {
    if (sortedValues.size() == 0) {
      throw new IllegalArgumentException(
          "Cannot compute the Median Absolute Deviation " +
              "of an empty list.");
    }
    // First get the median of the values.
    Double median = computeMedian(sortedValues);
    List<Double> deviations = new ArrayList<>(sortedValues);
    // Then update the list to store deviation from the median.
    for (int i = 0; i < sortedValues.size(); ++i) {
      deviations.set(i, Math.abs(sortedValues.get(i) - median));
    }
    // Finally get the median absolute deviation.
    Collections.sort(deviations);
    return computeMedian(deviations) * MAD_MULTIPLIER;
  }
  /**
   * Compute the median of a sorted list.
   */
  public static Double computeMedian(List<Double> sortedValues) {
    if (sortedValues.size() == 0) {
      throw new IllegalArgumentException(
          "Cannot compute the median of an empty list.");
    }
    Double median = sortedValues.get(sortedValues.size() / 2);
    if (sortedValues.size() % 2 == 0) {
      median += sortedValues.get((sortedValues.size() / 2) - 1);
      median /= 2;
    }
    return median;
  }
  /**
   * This method *must not* be used outside of unit tests.
   */
  @VisibleForTesting
  static void setMinOutlierDetectionPeers(long minOutlierDetectionPeers) {
    SlowNodeDetector.minOutlierDetectionPeers = minOutlierDetectionPeers;
  }
  @VisibleForTesting
  static long getMinOutlierDetectionPeers() {
    return minOutlierDetectionPeers;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/placement/SchedulingPlacementSet.java 
 | 68 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.NodeType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.SchedulingMode;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk;
import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
import java.util.Collection;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
/**
 * <p>
 * Comparing to {@link PlacementSet}, this also maintains
 * pending ResourceRequests:
 * - When new ResourceRequest(s) added to scheduler, or,
 * - Or new container allocated, scheduler can notify corresponding
 * PlacementSet.
 * </p>
 *
 * <p>
 * Different set of resource requests (E.g., resource requests with the
 * same schedulerKey) can have one instance of PlacementSet, each PlacementSet
 * can have different ways to order nodes depends on requests.
 * </p>
 */
public interface SchedulingPlacementSet<N extends SchedulerNode> {
  /**
   * Get iterator of preferred node depends on requirement and/or availability
   * @param clusterPlacementSet input cluster PlacementSet
   * @return iterator of preferred node
   */
  Iterator<N> getPreferredNodeIterator(PlacementSet<N> clusterPlacementSet);
  /**
   * Replace existing ResourceRequest by the new requests
   *
   * @param requests new ResourceRequests
   * @param recoverPreemptedRequestForAContainer if we're recovering resource
   * requests for preempted container
   * @return true if total pending resource changed
   */
  ResourceRequestUpdateResult updateResourceRequests(
      Collection<ResourceRequest> requests,
      boolean recoverPreemptedRequestForAContainer);
  /**
   * Get pending ResourceRequests by given schedulerRequestKey
   * @return Map of resourceName to ResourceRequest
   */
  Map<String, ResourceRequest> getResourceRequests();
  /**
   * Get pending ask for given resourceName. If there's no such pendingAsk,
   * returns {@link PendingAsk#ZERO}
   *
   * @param resourceName resourceName
   * @return PendingAsk
   */
  PendingAsk getPendingAsk(String resourceName);
  /**
   * Get #pending-allocations for given resourceName. If there's no such
   * pendingAsk, returns 0
   *
   * @param resourceName resourceName
   * @return #pending-allocations
   */
  int getOutstandingAsksCount(String resourceName);
  /**
   * Notify container allocated.
   * @param schedulerKey SchedulerRequestKey for this ResourceRequest
   * @param type Type of the allocation
   * @param node Which node this container allocated on
   * @return list of ResourceRequests deducted
   */
  List<ResourceRequest> allocate(SchedulerRequestKey schedulerKey,
      NodeType type, SchedulerNode node);
  /**
   * Returns list of accepted resourceNames.
   * @return Iterator of accepted resourceNames
   */
  Iterator<String> getAcceptedResouceNames();
  /**
   * We can still have pending requirement for a given NodeType and node
   * @param type Locality Type
   * @param node which node we will allocate on
   * @return true if we has pending requirement
   */
  boolean canAllocate(NodeType type, SchedulerNode node);
  /**
   * Can delay to give locality?
   * TODO (wangda): This should be moved out of SchedulingPlacementSet
   * and should belong to specific delay scheduling policy impl.
   *
   * @param resourceName resourceName
   * @return can/cannot
   */
  boolean canDelayTo(String resourceName);
  /**
   * Does this {@link SchedulingPlacementSet} accept resources on nodePartition?
   *
   * @param nodePartition nodePartition
   * @param schedulingMode schedulingMode
   * @return accepted/not
   */
  boolean acceptNodePartition(String nodePartition,
      SchedulingMode schedulingMode);
  /**
   * It is possible that one request can accept multiple node partition,
   * So this method returns primary node partition for pending resource /
   * headroom calculation.
   *
   * @return primary requested node partition
   */
  String getPrimaryRequestedNodePartition();
  /**
   * @return number of unique location asks with #pending greater than 0,
   * (like /rack1, host1, etc.).
   *
   * TODO (wangda): This should be moved out of SchedulingPlacementSet
   * and should belong to specific delay scheduling policy impl.
   */
  int getUniqueLocationAsks();
  /**
   * Print human-readable requests to LOG debug.
   */
  void showRequests();
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSPreemptionThread.java 
 | 114 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.placement.SchedulingPlacementSet;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.PendingAsk;
import org.apache.hadoop.yarn.util.resource.Resources;
import java.util.ArrayList;
import java.util.List;
import java.util.Timer;
import java.util.TimerTask;
/**
 * Thread that handles FairScheduler preemption.
 */
class FSPreemptionThread extends Thread {
  private static final Log LOG = LogFactory.getLog(FSPreemptionThread.class);
  protected final FSContext context;
  private final FairScheduler scheduler;
  private final long warnTimeBeforeKill;
  private final Timer preemptionTimer;
  FSPreemptionThread(FairScheduler scheduler) {
    this.scheduler = scheduler;
    this.context = scheduler.getContext();
    FairSchedulerConfiguration fsConf = scheduler.getConf();
    context.setPreemptionEnabled();
    context.setPreemptionUtilizationThreshold(
        fsConf.getPreemptionUtilizationThreshold());
    warnTimeBeforeKill = fsConf.getWaitTimeBeforeKill();
    preemptionTimer = new Timer("Preemption Timer", true);
    setDaemon(true);
    setName("FSPreemptionThread");
  }
  public void run() {
    while (!Thread.interrupted()) {
      FSAppAttempt starvedApp;
      try{
        starvedApp = context.getStarvedApps().take();
        if (!Resources.isNone(starvedApp.getStarvation())) {
          PreemptableContainers containers =
              identifyContainersToPreempt(starvedApp);
          if (containers != null) {
            preemptContainers(containers.containers);
          }
        }
      } catch (InterruptedException e) {
        LOG.info("Preemption thread interrupted! Exiting.");
        return;
      }
    }
  }
  /**
   * Given an app, identify containers to preempt to satisfy the app's next
   * resource request.
   *
   * @param starvedApp starved application for which we are identifying
   *                   preemption targets
   * @return list of containers to preempt to satisfy starvedApp, null if the
   * app cannot be satisfied by preempting any running containers
   */
  private PreemptableContainers identifyContainersToPreempt(
      FSAppAttempt starvedApp) {
    PreemptableContainers bestContainers = null;
    // Find the nodes that match the next resource request
    SchedulingPlacementSet nextPs =
        starvedApp.getAppSchedulingInfo().getFirstSchedulingPlacementSet();
    PendingAsk firstPendingAsk = nextPs.getPendingAsk(ResourceRequest.ANY);
    // TODO (KK): Should we check other resource requests if we can't match
    // the first one?
    Resource requestCapability = firstPendingAsk.getPerAllocationResource();
    List<FSSchedulerNode> potentialNodes =
        scheduler.getNodeTracker().getNodesByResourceName(
            nextPs.getAcceptedResouceNames().next().toString());
    // From the potential nodes, pick a node that has enough containers
    // from apps over their fairshare
    for (FSSchedulerNode node : potentialNodes) {
      // TODO (YARN-5829): Attempt to reserve the node for starved app. The
      // subsequent if-check needs to be reworked accordingly.
      FSAppAttempt nodeReservedApp = node.getReservedAppSchedulable();
      if (nodeReservedApp != null && !nodeReservedApp.equals(starvedApp)) {
        // This node is already reserved by another app. Let us not consider
        // this for preemption.
        continue;
      }
      int maxAMContainers = bestContainers == null ?
          Integer.MAX_VALUE : bestContainers.numAMContainers;
      PreemptableContainers preemptableContainers =
          identifyContainersToPreemptOnNode(requestCapability, node,
              maxAMContainers);
      if (preemptableContainers != null) {
        if (preemptableContainers.numAMContainers == 0) {
          return preemptableContainers;
        } else {
          bestContainers = preemptableContainers;
        }
      }
    }
    return bestContainers;
  }
  /**
   * Identify containers to preempt on a given node. Try to find a list with
   * least AM containers to avoid preempting AM containers. This method returns
   * a non-null set of containers only if the number of AM containers is less
   * than maxAMContainers.
   *
   * @param request resource requested
   * @param node the node to check
   * @param maxAMContainers max allowed AM containers in the set
   * @return list of preemptable containers with fewer AM containers than
   *         maxAMContainers if such a list exists; null otherwise.
   */
  private PreemptableContainers identifyContainersToPreemptOnNode(
      Resource request, FSSchedulerNode node, int maxAMContainers) {
    PreemptableContainers preemptableContainers =
        new PreemptableContainers(maxAMContainers);
    // Figure out list of containers to consider
    List<RMContainer> containersToCheck =
        node.getRunningContainersWithAMsAtTheEnd();
    containersToCheck.removeAll(node.getContainersForPreemption());
    // Initialize potential with unallocated resources
    Resource potential = Resources.clone(node.getUnallocatedResource());
    for (RMContainer container : containersToCheck) {
      FSAppAttempt app =
          scheduler.getSchedulerApp(container.getApplicationAttemptId());
      if (app.canContainerBePreempted(container)) {
        // Flag container for preemption
        if (!preemptableContainers.addContainer(container)) {
          return null;
        }
        Resources.addTo(potential, container.getAllocatedResource());
      }
      // Check if we have already identified enough containers
      if (Resources.fitsIn(request, potential)) {
        return preemptableContainers;
      } else {
        // TODO (YARN-5829): Unreserve the node for the starved app.
      }
    }
    return null;
  }
  private void preemptContainers(List<RMContainer> containers) {
    // Mark the containers as being considered for preemption on the node.
    // Make sure the containers are subsequently removed by calling
    // FSSchedulerNode#removeContainerForPreemption.
    if (containers.size() > 0) {
      FSSchedulerNode node = (FSSchedulerNode) scheduler.getNodeTracker()
          .getNode(containers.get(0).getNodeId());
      node.addContainersForPreemption(containers);
    }
    // Warn application about containers to be killed
    for (RMContainer container : containers) {
      ApplicationAttemptId appAttemptId = container.getApplicationAttemptId();
      FSAppAttempt app = scheduler.getSchedulerApp(appAttemptId);
      FSLeafQueue queue = app.getQueue();
      LOG.info("Preempting container " + container +
          " from queue " + queue.getName());
      app.trackContainerForPreemption(container);
    }
    // Schedule timer task to kill containers
    preemptionTimer.schedule(
        new PreemptContainersTask(containers), warnTimeBeforeKill);
  }
  private class PreemptContainersTask extends TimerTask {
    private List<RMContainer> containers;
    PreemptContainersTask(List<RMContainer> containers) {
      this.containers = containers;
    }
    @Override
    public void run() {
      for (RMContainer container : containers) {
        ContainerStatus status = SchedulerUtils.createPreemptedContainerStatus(
            container.getContainerId(), SchedulerUtils.PREEMPTED_CONTAINER);
        LOG.info("Killing container " + container);
        scheduler.completedContainer(
            container, status, RMContainerEventType.KILL);
        FSSchedulerNode containerNode = (FSSchedulerNode)
            scheduler.getNodeTracker().getNode(container.getAllocatedNode());
        containerNode.removeContainerForPreemption(container);
      }
    }
  }
  /**
   * A class to track preemptable containers.
   */
  private static class PreemptableContainers {
    List<RMContainer> containers;
    int numAMContainers;
    int maxAMContainers;
    PreemptableContainers(int maxAMContainers) {
      containers = new ArrayList<>();
      numAMContainers = 0;
      this.maxAMContainers = maxAMContainers;
    }
    /**
     * Add a container if the number of AM containers is less than
     * maxAMContainers.
     *
     * @param container the container to add
     * @return true if success; false otherwise
     */
    private boolean addContainer(RMContainer container) {
      if (container.isAMContainer()) {
        numAMContainers++;
        if (numAMContainers >= maxAMContainers) {
          return false;
        }
      }
      containers.add(container);
      return true;
    }
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ContainerUpdateContext.java 
 | 267 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
import org.apache.hadoop.yarn.api.records.ExecutionType;
import org.apache.hadoop.yarn.api.records.ExecutionTypeRequest;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.UpdateContainerError;
import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.scheduler.SchedulerRequestKey;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
/**
 * Class encapsulates all outstanding container increase and decrease
 * requests for an application.
 */
public class ContainerUpdateContext {
  public static final ContainerId UNDEFINED =
      ContainerId.newContainerId(ApplicationAttemptId.newInstance(
              ApplicationId.newInstance(-1, -1), -1), -1);
  protected static final RecordFactory RECORD_FACTORY =
      RecordFactoryProvider.getRecordFactory(null);
  // Keep track of containers that are undergoing promotion
  private final Map<SchedulerRequestKey, Map<Resource,
      Map<NodeId, Set<ContainerId>>>> outstandingIncreases = new HashMap<>();
  private final Set<ContainerId> outstandingDecreases = new HashSet<>();
  private final AppSchedulingInfo appSchedulingInfo;
  ContainerUpdateContext(AppSchedulingInfo appSchedulingInfo) {
    this.appSchedulingInfo = appSchedulingInfo;
  }
  private synchronized boolean isBeingIncreased(Container container) {
    Map<Resource, Map<NodeId, Set<ContainerId>>> resourceMap =
        outstandingIncreases.get(
            new SchedulerRequestKey(container.getPriority(),
                container.getAllocationRequestId(), container.getId()));
    if (resourceMap != null) {
      Map<NodeId, Set<ContainerId>> locationMap =
          resourceMap.get(container.getResource());
      if (locationMap != null) {
        Set<ContainerId> containerIds = locationMap.get(container.getNodeId());
        if (containerIds != null && !containerIds.isEmpty()) {
          return containerIds.contains(container.getId());
        }
      }
    }
    return false;
  }
  /**
   * Add the container to outstanding decreases.
   * @param container Container.
   * @return true if updated to outstanding decreases was successful.
   */
  public synchronized boolean checkAndAddToOutstandingDecreases(
      Container container) {
    if (isBeingIncreased(container)
        || outstandingDecreases.contains(container.getId())) {
      return false;
    }
    outstandingDecreases.add(container.getId());
    return true;
  }
  /**
   * Add the container to outstanding increases.
   * @param rmContainer RMContainer.
   * @param schedulerNode SchedulerNode.
   * @param updateRequest UpdateContainerRequest.
   * @return true if updated to outstanding increases was successful.
   */
  public synchronized boolean checkAndAddToOutstandingIncreases(
      RMContainer rmContainer, SchedulerNode schedulerNode,
      UpdateContainerRequest updateRequest) {
    Container container = rmContainer.getContainer();
    SchedulerRequestKey schedulerKey =
        SchedulerRequestKey.create(updateRequest,
            rmContainer.getAllocatedSchedulerKey());
    Map<Resource, Map<NodeId, Set<ContainerId>>> resourceMap =
        outstandingIncreases.get(schedulerKey);
    if (resourceMap == null) {
      resourceMap = new HashMap<>();
      outstandingIncreases.put(schedulerKey, resourceMap);
    }
    Map<NodeId, Set<ContainerId>> locationMap =
        resourceMap.get(container.getResource());
    if (locationMap == null) {
      locationMap = new HashMap<>();
      resourceMap.put(container.getResource(), locationMap);
    }
    Set<ContainerId> containerIds = locationMap.get(container.getNodeId());
    if (containerIds == null) {
      containerIds = new HashSet<>();
      locationMap.put(container.getNodeId(), containerIds);
    }
    if (containerIds.contains(container.getId())
        || outstandingDecreases.contains(container.getId())) {
      return false;
    }
    containerIds.add(container.getId());
    Map<SchedulerRequestKey, Map<String, ResourceRequest>> updateResReqs =
        new HashMap<>();
    Resource resToIncrease = getResourceToIncrease(updateRequest, rmContainer);
    Map<String, ResourceRequest> resMap =
        createResourceRequests(rmContainer, schedulerNode,
            schedulerKey, resToIncrease);
    updateResReqs.put(schedulerKey, resMap);
    appSchedulingInfo.addToPlacementSets(false, updateResReqs);
    return true;
  }
  private Map<String, ResourceRequest> createResourceRequests(
      RMContainer rmContainer, SchedulerNode schedulerNode,
      SchedulerRequestKey schedulerKey, Resource resToIncrease) {
    Map<String, ResourceRequest> resMap = new HashMap<>();
    resMap.put(rmContainer.getContainer().getNodeId().getHost(),
        createResourceReqForIncrease(schedulerKey, resToIncrease,
            RECORD_FACTORY.newRecordInstance(ResourceRequest.class),
            rmContainer, rmContainer.getContainer().getNodeId().getHost()));
    resMap.put(schedulerNode.getRackName(),
        createResourceReqForIncrease(schedulerKey, resToIncrease,
            RECORD_FACTORY.newRecordInstance(ResourceRequest.class),
            rmContainer, schedulerNode.getRackName()));
    resMap.put(ResourceRequest.ANY,
        createResourceReqForIncrease(schedulerKey, resToIncrease,
            RECORD_FACTORY.newRecordInstance(ResourceRequest.class),
            rmContainer, ResourceRequest.ANY));
    return resMap;
  }
  private Resource getResourceToIncrease(UpdateContainerRequest updateReq,
      RMContainer rmContainer) {
    if (updateReq.getContainerUpdateType() ==
        ContainerUpdateType.PROMOTE_EXECUTION_TYPE) {
      return rmContainer.getContainer().getResource();
    }
    // TODO: Fix this for container increase..
    //       This has to equal the Resources in excess of fitsIn()
    //       for container increase and is equal to the container total
    //       resource for Promotion.
    return null;
  }
  private static ResourceRequest createResourceReqForIncrease(
      SchedulerRequestKey schedulerRequestKey, Resource resToIncrease,
      ResourceRequest rr, RMContainer rmContainer, String resourceName) {
    rr.setResourceName(resourceName);
    rr.setNumContainers(1);
    rr.setRelaxLocality(false);
    rr.setPriority(rmContainer.getContainer().getPriority());
    rr.setAllocationRequestId(schedulerRequestKey.getAllocationRequestId());
    rr.setCapability(resToIncrease);
    rr.setNodeLabelExpression(rmContainer.getNodeLabelExpression());
    rr.setExecutionTypeRequest(ExecutionTypeRequest.newInstance(
        ExecutionType.GUARANTEED, true));
    return rr;
  }
  /**
   * Remove Container from outstanding increases / decreases. Calling this
   * method essentially completes the update process.
   * @param schedulerKey SchedulerRequestKey.
   * @param container Container.
   */
  public synchronized void removeFromOutstandingUpdate(
      SchedulerRequestKey schedulerKey, Container container) {
    Map<Resource, Map<NodeId, Set<ContainerId>>> resourceMap =
        outstandingIncreases.get(schedulerKey);
    if (resourceMap != null) {
      Map<NodeId, Set<ContainerId>> locationMap =
          resourceMap.get(container.getResource());
      if (locationMap != null) {
        Set<ContainerId> containerIds = locationMap.get(container.getNodeId());
        if (containerIds != null && !containerIds.isEmpty()) {
          containerIds.remove(container.getId());
          if (containerIds.isEmpty()) {
            locationMap.remove(container.getNodeId());
          }
        }
        if (locationMap.isEmpty()) {
          resourceMap.remove(container.getResource());
        }
      }
      if (resourceMap.isEmpty()) {
        outstandingIncreases.remove(schedulerKey);
      }
    }
    outstandingDecreases.remove(container.getId());
  }
  /**
   * Check if a new container is to be matched up against an outstanding
   * Container increase request.
   * @param schedulerKey SchedulerRequestKey.
   * @param rmContainer RMContainer.
   * @return ContainerId.
   */
  public ContainerId matchContainerToOutstandingIncreaseReq(
      SchedulerNode node, SchedulerRequestKey schedulerKey,
      RMContainer rmContainer) {
    ContainerId retVal = null;
    Container container = rmContainer.getContainer();
    Map<Resource, Map<NodeId, Set<ContainerId>>> resourceMap =
        outstandingIncreases.get(schedulerKey);
    if (resourceMap != null) {
      Map<NodeId, Set<ContainerId>> locationMap =
          resourceMap.get(container.getResource());
      if (locationMap != null) {
        Set<ContainerId> containerIds = locationMap.get(container.getNodeId());
        if (containerIds != null && !containerIds.isEmpty()) {
          retVal = containerIds.iterator().next();
        }
      }
    }
    // Allocation happened on NM on the same host, but not on the NM
    // we need.. We need to signal that this container has to be released.
    // We also need to add these requests back.. to be reallocated.
    if (resourceMap != null && retVal == null) {
      Map<SchedulerRequestKey, Map<String, ResourceRequest>> reqsToUpdate =
          new HashMap<>();
      Map<String, ResourceRequest> resMap = createResourceRequests
          (rmContainer, node, schedulerKey,
          rmContainer.getContainer().getResource());
      reqsToUpdate.put(schedulerKey, resMap);
      appSchedulingInfo.addToPlacementSets(true, reqsToUpdate);
      return UNDEFINED;
    }
    return retVal;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogToolUtils.java 
 | 117 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.logaggregation;
import java.io.DataInputStream;
import java.io.EOFException;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.math3.util.Pair;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogKey;
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat.LogReader;
import org.apache.hadoop.yarn.util.Times;
/**
 * This class contains several utility function which could be used in different
 * log tools.
 *
 */
public final class LogToolUtils {
  private LogToolUtils() {}
  /**
   * Return a list of {@link ContainerLogMeta} for a container
   * from Remote FileSystem.
   *
   * @param conf the configuration
   * @param appId the applicationId
   * @param containerIdStr the containerId
   * @param nodeId the nodeId
   * @param appOwner the application owner
   * @return a list of {@link ContainerLogMeta}
   * @throws IOException if there is no available log file
   */
  public static List<ContainerLogMeta> getContainerLogMetaFromRemoteFS(
      Configuration conf, ApplicationId appId, String containerIdStr,
      String nodeId, String appOwner) throws IOException {
    List<ContainerLogMeta> containersLogMeta = new ArrayList<>();
    boolean getAllContainers = (containerIdStr == null);
    String nodeIdStr = (nodeId == null) ? null
        : LogAggregationUtils.getNodeString(nodeId);
    RemoteIterator<FileStatus> nodeFiles = LogAggregationUtils
        .getRemoteNodeFileDir(conf, appId, appOwner);
    if (nodeFiles == null) {
      throw new IOException("There is no available log fils for "
          + "application:" + appId);
    }
    while (nodeFiles.hasNext()) {
      FileStatus thisNodeFile = nodeFiles.next();
      if (nodeIdStr != null) {
        if (!thisNodeFile.getPath().getName().contains(nodeIdStr)) {
          continue;
        }
      }
      if (!thisNodeFile.getPath().getName()
          .endsWith(LogAggregationUtils.TMP_FILE_SUFFIX)) {
        AggregatedLogFormat.LogReader reader =
            new AggregatedLogFormat.LogReader(conf,
            thisNodeFile.getPath());
        try {
          DataInputStream valueStream;
          LogKey key = new LogKey();
          valueStream = reader.next(key);
          while (valueStream != null) {
            if (getAllContainers || (key.toString().equals(containerIdStr))) {
              ContainerLogMeta containerLogMeta = new ContainerLogMeta(
                  key.toString(), thisNodeFile.getPath().getName());
              while (true) {
                try {
                  Pair<String, String> logMeta =
                      LogReader.readContainerMetaDataAndSkipData(
                          valueStream);
                  containerLogMeta.addLogMeta(
                      logMeta.getFirst(),
                      logMeta.getSecond(),
                      Times.format(thisNodeFile.getModificationTime()));
                } catch (EOFException eof) {
                  break;
                }
              }
              containersLogMeta.add(containerLogMeta);
              if (!getAllContainers) {
                break;
              }
            }
            // Next container
            key = new LogKey();
            valueStream = reader.next(key);
          }
        } finally {
          reader.close();
        }
      }
    }
    return containersLogMeta;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Util.java 
 | 255 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs.server.common;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.security.DigestInputStream;
import java.security.MessageDigest;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import com.google.common.collect.Lists;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.server.namenode.ImageServlet;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.util.Time;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
@InterfaceAudience.Private
public final class Util {
  private final static Log LOG = LogFactory.getLog(Util.class.getName());
  public final static String FILE_LENGTH = "File-Length";
  public final static String CONTENT_LENGTH = "Content-Length";
  public final static String MD5_HEADER = "X-MD5-Digest";
  public final static String CONTENT_TYPE = "Content-Type";
  public final static String CONTENT_TRANSFER_ENCODING = "Content-Transfer-Encoding";
  public final static int IO_FILE_BUFFER_SIZE;
  private static final boolean isSpnegoEnabled;
  public static final URLConnectionFactory connectionFactory;
  static {
    Configuration conf = new Configuration();
    connectionFactory = URLConnectionFactory
        .newDefaultURLConnectionFactory(conf);
    isSpnegoEnabled = UserGroupInformation.isSecurityEnabled();
    IO_FILE_BUFFER_SIZE = DFSUtilClient.getIoFileBufferSize(conf);
  }
  /**
   * Interprets the passed string as a URI. In case of error it 
   * assumes the specified string is a file.
   *
   * @param s the string to interpret
   * @return the resulting URI
   */
  static URI stringAsURI(String s) throws IOException {
    URI u = null;
    // try to make a URI
    try {
      u = new URI(s);
    } catch (URISyntaxException e){
      LOG.error("Syntax error in URI " + s
          + ". Please check hdfs configuration.", e);
    }
    // if URI is null or scheme is undefined, then assume it's file://
    if(u == null || u.getScheme() == null){
      LOG.warn("Path " + s + " should be specified as a URI "
          + "in configuration files. Please update hdfs configuration.");
      u = fileAsURI(new File(s));
    }
    return u;
  }
  /**
   * Converts the passed File to a URI. This method trims the trailing slash if
   * one is appended because the underlying file is in fact a directory that
   * exists.
   * 
   * @param f the file to convert
   * @return the resulting URI
   */
  public static URI fileAsURI(File f) throws IOException {
    URI u = f.getCanonicalFile().toURI();
    
    // trim the trailing slash, if it's present
    if (u.getPath().endsWith("/")) {
      String uriAsString = u.toString();
      try {
        u = new URI(uriAsString.substring(0, uriAsString.length() - 1));
      } catch (URISyntaxException e) {
        throw new IOException(e);
      }
    }
    
    return u;
  }
  /**
   * Converts a collection of strings into a collection of URIs.
   * @param names collection of strings to convert to URIs
   * @return collection of URIs
   */
  public static List<URI> stringCollectionAsURIs(
                                  Collection<String> names) {
    List<URI> uris = new ArrayList<>(names.size());
    for(String name : names) {
      try {
        uris.add(stringAsURI(name));
      } catch (IOException e) {
        LOG.error("Error while processing URI: " + name, e);
      }
    }
    return uris;
  }
  /**
   * Downloads the files at the specified url location into destination
   * storage.
   */
  public static MD5Hash doGetUrl(URL url, List<File> localPaths,
      Storage dstStorage, boolean getChecksum, int timeout) throws IOException {
    HttpURLConnection connection;
    try {
      connection = (HttpURLConnection)
          connectionFactory.openConnection(url, isSpnegoEnabled);
    } catch (AuthenticationException e) {
      throw new IOException(e);
    }
    setTimeout(connection, timeout);
    if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
      throw new HttpGetFailedException("Image transfer servlet at " + url +
              " failed with status code " + connection.getResponseCode() +
              "\nResponse message:\n" + connection.getResponseMessage(),
          connection);
    }
    long advertisedSize;
    String contentLength = connection.getHeaderField(CONTENT_LENGTH);
    if (contentLength != null) {
      advertisedSize = Long.parseLong(contentLength);
    } else {
      throw new IOException(CONTENT_LENGTH + " header is not provided " +
          "by the namenode when trying to fetch " + url);
    }
    MD5Hash advertisedDigest = parseMD5Header(connection);
    String fsImageName = connection
        .getHeaderField(ImageServlet.HADOOP_IMAGE_EDITS_HEADER);
    InputStream stream = connection.getInputStream();
    return receiveFile(url.toExternalForm(), localPaths, dstStorage,
        getChecksum, advertisedSize, advertisedDigest, fsImageName, stream,
        null);
  }
  /**
   * Receives file at the url location from the input stream and puts them in
   * the specified destination storage location.
   */
  public static MD5Hash receiveFile(String url, List<File> localPaths,
      Storage dstStorage, boolean getChecksum, long advertisedSize,
      MD5Hash advertisedDigest, String fsImageName, InputStream stream,
      DataTransferThrottler throttler) throws
      IOException {
    long startTime = Time.monotonicNow();
    Map<FileOutputStream, File> streamPathMap = new HashMap<>();
    StringBuilder xferStats = new StringBuilder();
    double xferCombined = 0;
    if (localPaths != null) {
      // If the local paths refer to directories, use the server-provided header
      // as the filename within that directory
      List<File> newLocalPaths = new ArrayList<>();
      for (File localPath : localPaths) {
        if (localPath.isDirectory()) {
          if (fsImageName == null) {
            throw new IOException("No filename header provided by server");
          }
          newLocalPaths.add(new File(localPath, fsImageName));
        } else {
          newLocalPaths.add(localPath);
        }
      }
      localPaths = newLocalPaths;
    }
    long received = 0;
    MessageDigest digester = null;
    if (getChecksum) {
      digester = MD5Hash.getDigester();
      stream = new DigestInputStream(stream, digester);
    }
    boolean finishedReceiving = false;
    List<FileOutputStream> outputStreams = Lists.newArrayList();
    try {
      if (localPaths != null) {
        for (File f : localPaths) {
          try {
            if (f.exists()) {
              LOG.warn("Overwriting existing file " + f
                  + " with file downloaded from " + url);
            }
            FileOutputStream fos = new FileOutputStream(f);
            outputStreams.add(fos);
            streamPathMap.put(fos, f);
          } catch (IOException ioe) {
            LOG.warn("Unable to download file " + f, ioe);
            // This will be null if we're downloading the fsimage to a file
            // outside of an NNStorage directory.
            if (dstStorage != null &&
                (dstStorage instanceof StorageErrorReporter)) {
              ((StorageErrorReporter)dstStorage).reportErrorOnFile(f);
            }
          }
        }
        if (outputStreams.isEmpty()) {
          throw new IOException(
              "Unable to download to any storage directory");
        }
      }
      int num = 1;
      byte[] buf = new byte[IO_FILE_BUFFER_SIZE];
      while (num > 0) {
        num = stream.read(buf);
        if (num > 0) {
          received += num;
          for (FileOutputStream fos : outputStreams) {
            fos.write(buf, 0, num);
          }
          if (throttler != null) {
            throttler.throttle(num);
          }
        }
      }
      finishedReceiving = true;
      double xferSec = Math.max(
          ((float)(Time.monotonicNow() - startTime)) / 1000.0, 0.001);
      long xferKb = received / 1024;
      xferCombined += xferSec;
      xferStats.append(
          String.format(" The fsimage download took %.2fs at %.2f KB/s.",
              xferSec, xferKb / xferSec));
    } finally {
      stream.close();
      for (FileOutputStream fos : outputStreams) {
        long flushStartTime = Time.monotonicNow();
        fos.getChannel().force(true);
        fos.close();
        double writeSec = Math.max(((float)
            (flushStartTime - Time.monotonicNow())) / 1000.0, 0.001);
        xferCombined += writeSec;
        xferStats.append(String
            .format(" Synchronous (fsync) write to disk of " +
                streamPathMap.get(fos).getAbsolutePath() +
                " took %.2fs.", writeSec));
      }
      // Something went wrong and did not finish reading.
      // Remove the temporary files.
      if (!finishedReceiving) {
        deleteTmpFiles(localPaths);
      }
      if (finishedReceiving && received != advertisedSize) {
        // only throw this exception if we think we read all of it on our end
        // -- otherwise a client-side IOException would be masked by this
        // exception that makes it look like a server-side problem!
        deleteTmpFiles(localPaths);
        throw new IOException("File " + url + " received length " + received +
            " is not of the advertised size " +
            advertisedSize);
      }
    }
    xferStats.insert(0, String.format("Combined time for fsimage download and" +
        " fsync to all disks took %.2fs.", xferCombined));
    LOG.info(xferStats.toString());
    if (digester != null) {
      MD5Hash computedDigest = new MD5Hash(digester.digest());
      if (advertisedDigest != null &&
          !computedDigest.equals(advertisedDigest)) {
        deleteTmpFiles(localPaths);
        throw new IOException("File " + url + " computed digest " +
            computedDigest + " does not match advertised digest " +
            advertisedDigest);
      }
      return computedDigest;
    } else {
      return null;
    }
  }
  private static void deleteTmpFiles(List<File> files) {
    if (files == null) {
      return;
    }
    LOG.info("Deleting temporary files: " + files);
    for (File file : files) {
      if (!file.delete()) {
        LOG.warn("Deleting " + file + " has failed");
      }
    }
  }
  /**
   * Sets a timeout value in millisecods for the Http connection.
   * @param connection the Http connection for which timeout needs to be set
   * @param timeout value to be set as timeout in milliseconds
   */
  public static void setTimeout(HttpURLConnection connection, int timeout) {
    if (timeout > 0) {
      connection.setConnectTimeout(timeout);
      connection.setReadTimeout(timeout);
    }
  }
  private static MD5Hash parseMD5Header(HttpURLConnection connection) {
    String header = connection.getHeaderField(MD5_HEADER);
    return (header != null) ? new MD5Hash(header) : null;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SlowPeerTracker.java 
 | 273 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 * <p>
 * http://www.apache.org/licenses/LICENSE-2.0
 * <p>
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs.server.blockmanagement;
import com.fasterxml.jackson.annotation.JsonProperty;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.ImmutableMap;
import com.google.common.primitives.Ints;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.util.Timer;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.Map;
import java.util.PriorityQueue;
import java.util.Set;
import java.util.SortedSet;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.TimeUnit;
/**
 * This class aggregates information from {@link SlowPeerReports} received via
 * heartbeats.
 */
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class SlowPeerTracker {
  public static final Logger LOG =
      LoggerFactory.getLogger(SlowPeerTracker.class);
  /**
   * Time duration after which a report is considered stale. This is
   * set to DFS_DATANODE_SLOW_PEER_REPORT_INTERVAL_KEY * 3 i.e.
   * maintained for at least two successive reports.
   */
  private final long reportValidityMs;
  /**
   * Timer object for querying the current time. Separated out for
   * unit testing.
   */
  private final Timer timer;
  /**
   * Number of nodes to include in JSON report. We will return nodes with
   * the highest number of votes from peers.
   */
  private static final int MAX_NODES_TO_REPORT = 5;
  /**
   * Information about peers that have reported a node as being slow.
   * Each outer map entry is a map of (DatanodeId) -> (timestamp),
   * mapping reporting nodes to the timestamp of the last report from
   * that node.
   *
   * DatanodeId could be the DataNodeId or its address. We
   * don't care as long as the caller uses it consistently.
   *
   * Stale reports are not evicted proactively and can potentially
   * hang around forever.
   */
  private final ConcurrentMap<String, ConcurrentMap<String, Long>>
      allReports;
  public SlowPeerTracker(Configuration conf, Timer timer) {
    this.timer = timer;
    this.allReports = new ConcurrentHashMap<>();
    this.reportValidityMs = conf.getTimeDuration(
        DFSConfigKeys.DFS_DATANODE_SLOW_PEERS_REPORT_INTERVAL_KEY,
        DFSConfigKeys.DFS_DATANODE_SLOW_PEERS_REPORT_INTERVAL_DEFAULT,
        TimeUnit.MILLISECONDS) * 3;
  }
  /**
   * Add a new report. DatanodeIds can be the DataNodeIds or addresses
   * We don't care as long as the caller is consistent.
   *
   * @param reportingNode DataNodeId of the node reporting on its peer.
   * @param slowNode DataNodeId of the peer suspected to be slow.
   */
  public void addReport(String slowNode,
                        String reportingNode) {
    ConcurrentMap<String, Long> nodeEntries = allReports.get(slowNode);
    if (nodeEntries == null) {
      // putIfAbsent guards against multiple writers.
      allReports.putIfAbsent(slowNode, new ConcurrentHashMap<>());
      nodeEntries = allReports.get(slowNode);
    }
    // Replace the existing entry from this node, if any.
    nodeEntries.put(reportingNode, timer.monotonicNow());
  }
  /**
   * Retrieve the non-expired reports that mark a given DataNode
   * as slow. Stale reports are excluded.
   *
   * @param slowNode target node Id.
   * @return set of reports which implicate the target node as being slow.
   */
  public Set<String> getReportsForNode(String slowNode) {
    final ConcurrentMap<String, Long> nodeEntries =
        allReports.get(slowNode);
    if (nodeEntries == null || nodeEntries.isEmpty()) {
      return Collections.emptySet();
    }
    return filterNodeReports(nodeEntries, timer.monotonicNow());
  }
  /**
   * Retrieve all reports for all nodes. Stale reports are excluded.
   *
   * @return map from SlowNodeId -> (set of nodes reporting peers).
   */
  public Map<String, SortedSet<String>> getReportsForAllDataNodes() {
    if (allReports.isEmpty()) {
      return ImmutableMap.of();
    }
    final Map<String, SortedSet<String>> allNodesValidReports = new HashMap<>();
    final long now = timer.monotonicNow();
    for (Map.Entry<String, ConcurrentMap<String, Long>> entry :
        allReports.entrySet()) {
      SortedSet<String> validReports = filterNodeReports(entry.getValue(), now);
      if (!validReports.isEmpty()) {
        allNodesValidReports.put(entry.getKey(), validReports);
      }
    }
    return allNodesValidReports;
  }
  /**
   * Filter the given reports to return just the valid ones.
   *
   * @param reports
   * @param now
   * @return
   */
  private SortedSet<String> filterNodeReports(
      ConcurrentMap<String, Long> reports, long now) {
    final SortedSet<String> validReports = new TreeSet<>();
    for (Map.Entry<String, Long> entry : reports.entrySet()) {
      if (now - entry.getValue() < reportValidityMs) {
        validReports.add(entry.getKey());
      }
    }
    return validReports;
  }
  /**
   * Retrieve all valid reports as a JSON string.
   * @return serialized representation of valid reports. null if
   *         serialization failed.
   */
  public String getJson() {
    Collection<ReportForJson> validReports = getJsonReports(
        MAX_NODES_TO_REPORT);
    ObjectMapper objectMapper = new ObjectMapper();
    try {
      return objectMapper.writeValueAsString(validReports);
    } catch (JsonProcessingException e) {
      // Failed to serialize. Don't log the exception call stack.
      LOG.debug("Failed to serialize statistics" + e);
      return null;
    }
  }
  /**
   * This structure is a thin wrapper over reports to make Json
   * [de]serialization easy.
   */
  public static class ReportForJson {
    @JsonProperty("SlowNode")
    final private String slowNode;
    @JsonProperty("ReportingNodes")
    final private SortedSet<String> reportingNodes;
    public ReportForJson(
        @JsonProperty("SlowNode") String slowNode,
        @JsonProperty("ReportingNodes") SortedSet<String> reportingNodes) {
      this.slowNode = slowNode;
      this.reportingNodes = reportingNodes;
    }
    public String getSlowNode() {
      return slowNode;
    }
    public SortedSet<String> getReportingNodes() {
      return reportingNodes;
    }
  }
  /**
   * Retrieve reports in a structure for generating JSON, limiting the
   * output to the top numNodes nodes i.e nodes with the most reports.
   * @param numNodes number of nodes to return. This is to limit the
   *                 size of the generated JSON.
   */
  private Collection<ReportForJson> getJsonReports(int numNodes) {
    if (allReports.isEmpty()) {
      return Collections.emptyList();
    }
    final PriorityQueue<ReportForJson> topNReports =
        new PriorityQueue<>(allReports.size(),
            new Comparator<ReportForJson>() {
          @Override
          public int compare(ReportForJson o1, ReportForJson o2) {
            return Ints.compare(o1.reportingNodes.size(),
                o2.reportingNodes.size());
          }
        });
    final long now = timer.monotonicNow();
    for (Map.Entry<String, ConcurrentMap<String, Long>> entry :
        allReports.entrySet()) {
      SortedSet<String> validReports = filterNodeReports(
          entry.getValue(), now);
      if (!validReports.isEmpty()) {
        if (topNReports.size() < numNodes) {
          topNReports.add(new ReportForJson(entry.getKey(), validReports));
        } else if (topNReports.peek().getReportingNodes().size() <
            validReports.size()){
          // Remove the lowest element
          topNReports.poll();
          topNReports.add(new ReportForJson(entry.getKey(), validReports));
        }
      }
    }
    return topNReports;
  }
  @VisibleForTesting
  long getReportValidityMs() {
    return reportValidityMs;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ContainerUpdates.java 
 | 68 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.yarn.api.records.UpdateContainerRequest;
import java.util.ArrayList;
import java.util.List;
/**
 * Holder class that maintains list of container update requests
 */
public class ContainerUpdates {
  final List<UpdateContainerRequest> increaseRequests = new ArrayList<>();
  final List<UpdateContainerRequest> decreaseRequests = new ArrayList<>();
  final List<UpdateContainerRequest> promotionRequests = new ArrayList<>();
  final List<UpdateContainerRequest> demotionRequests = new ArrayList<>();
  /**
   * Returns Container Increase Requests.
   * @return Container Increase Requests.
   */
  public List<UpdateContainerRequest> getIncreaseRequests() {
    return increaseRequests;
  }
  /**
   * Returns Container Decrease Requests.
   * @return Container Decrease Requests.
   */
  public List<UpdateContainerRequest> getDecreaseRequests() {
    return decreaseRequests;
  }
  /**
   * Returns Container Promotion Requests.
   * @return Container Promotion Requests.
   */
  public List<UpdateContainerRequest> getPromotionRequests() {
    return promotionRequests;
  }
  /**
   * Returns Container Demotion Requests.
   * @return Container Demotion Requests.
   */
  public List<UpdateContainerRequest> getDemotionRequests() {
    return demotionRequests;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestContainerLocalizer.java 
 | 264 
							 | 
	/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements.  See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership.  The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License.  You may obtain a copy of the License at
*
*     http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer;
import static junit.framework.TestCase.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.any;
import static org.mockito.Matchers.anyBoolean;
import static org.mockito.Matchers.anyInt;
import static org.mockito.Matchers.argThat;
import static org.mockito.Matchers.eq;
import static org.mockito.Matchers.isA;
import static org.mockito.Matchers.same;
import static org.mockito.Mockito.doAnswer;
import static org.mockito.Mockito.doNothing;
import static org.mockito.Mockito.doReturn;
import static org.mockito.Mockito.doThrow;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Random;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletionService;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import com.google.common.base.Supplier;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.AbstractFileSystem;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.Shell.ShellCommandExecutor;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.server.nodemanager.api.LocalizationProtocol;
import org.apache.hadoop.yarn.server.nodemanager.api.ResourceLocalizationSpec;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerStatus;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.ArgumentMatcher;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
public class TestContainerLocalizer {
  static final Log LOG = LogFactory.getLog(TestContainerLocalizer.class);
  static final Path basedir =
      new Path("target", TestContainerLocalizer.class.getName());
  static final FsPermission CACHE_DIR_PERM = new FsPermission((short)0710);
  static final String appUser = "yak";
  static final String appId = "app_RM_0";
  static final String containerId = "container_0";
  static final InetSocketAddress nmAddr =
      new InetSocketAddress("foobar", 8040);
  @Test
  public void testMain() throws Exception {
    ContainerLocalizerWrapper wrapper = new ContainerLocalizerWrapper();
    ContainerLocalizer localizer =
        wrapper.setupContainerLocalizerForTest();
    Random random = wrapper.random;
    List<Path> localDirs = wrapper.localDirs;
    Path tokenPath = wrapper.tokenPath;
    LocalizationProtocol nmProxy = wrapper.nmProxy;
    AbstractFileSystem spylfs = wrapper.spylfs;
    mockOutDownloads(localizer);
    // verify created cache
    List<Path> privCacheList = new ArrayList<Path>();
    List<Path> appCacheList = new ArrayList<Path>();
    for (Path p : localDirs) {
      Path base = new Path(new Path(p, ContainerLocalizer.USERCACHE), appUser);
      Path privcache = new Path(base, ContainerLocalizer.FILECACHE);
      privCacheList.add(privcache);
      Path appDir =
          new Path(base, new Path(ContainerLocalizer.APPCACHE, appId));
      Path appcache = new Path(appDir, ContainerLocalizer.FILECACHE);
      appCacheList.add(appcache);
    }
    // mock heartbeat responses from NM
    ResourceLocalizationSpec rsrcA =
        getMockRsrc(random, LocalResourceVisibility.PRIVATE,
          privCacheList.get(0));
    ResourceLocalizationSpec rsrcB =
        getMockRsrc(random, LocalResourceVisibility.PRIVATE,
          privCacheList.get(0));
    ResourceLocalizationSpec rsrcC =
        getMockRsrc(random, LocalResourceVisibility.APPLICATION,
          appCacheList.get(0));
    ResourceLocalizationSpec rsrcD =
        getMockRsrc(random, LocalResourceVisibility.PRIVATE,
          privCacheList.get(0));
    when(nmProxy.heartbeat(isA(LocalizerStatus.class)))
      .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,
            Collections.singletonList(rsrcA)))
      .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,
            Collections.singletonList(rsrcB)))
      .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,
            Collections.singletonList(rsrcC)))
      .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,
            Collections.singletonList(rsrcD)))
      .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,
            Collections.<ResourceLocalizationSpec>emptyList()))
      .thenReturn(new MockLocalizerHeartbeatResponse(LocalizerAction.DIE,
            null));
    LocalResource tRsrcA = rsrcA.getResource();
    LocalResource tRsrcB = rsrcB.getResource();
    LocalResource tRsrcC = rsrcC.getResource();
    LocalResource tRsrcD = rsrcD.getResource();
    doReturn(
      new FakeDownload(rsrcA.getResource().getResource().getFile(), true))
      .when(localizer).download(isA(Path.class), eq(tRsrcA),
        isA(UserGroupInformation.class));
    doReturn(
      new FakeDownload(rsrcB.getResource().getResource().getFile(), true))
      .when(localizer).download(isA(Path.class), eq(tRsrcB),
        isA(UserGroupInformation.class));
    doReturn(
      new FakeDownload(rsrcC.getResource().getResource().getFile(), true))
      .when(localizer).download(isA(Path.class), eq(tRsrcC),
        isA(UserGroupInformation.class));
    doReturn(
      new FakeDownload(rsrcD.getResource().getResource().getFile(), true))
      .when(localizer).download(isA(Path.class), eq(tRsrcD),
        isA(UserGroupInformation.class));
    // run localization
    localizer.runLocalization(nmAddr);
    for (Path p : localDirs) {
      Path base = new Path(new Path(p, ContainerLocalizer.USERCACHE), appUser);
      Path privcache = new Path(base, ContainerLocalizer.FILECACHE);
      // $x/usercache/$user/filecache
      verify(spylfs).mkdir(eq(privcache), eq(CACHE_DIR_PERM), eq(false));
      Path appDir =
        new Path(base, new Path(ContainerLocalizer.APPCACHE, appId));
      // $x/usercache/$user/appcache/$appId/filecache
      Path appcache = new Path(appDir, ContainerLocalizer.FILECACHE);
      verify(spylfs).mkdir(eq(appcache), eq(CACHE_DIR_PERM), eq(false));
    }
    // verify tokens read at expected location
    verify(spylfs).open(tokenPath);
    // verify downloaded resources reported to NM
    verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcA.getResource())));
    verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcB.getResource())));
    verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcC.getResource())));
    verify(nmProxy).heartbeat(argThat(new HBMatches(rsrcD.getResource())));
    // verify all HB use localizerID provided
    verify(nmProxy, never()).heartbeat(argThat(
        new ArgumentMatcher<LocalizerStatus>() {
          @Override
          public boolean matches(Object o) {
            LocalizerStatus status = (LocalizerStatus) o;
            return !containerId.equals(status.getLocalizerId());
          }
        }));
  }
  @Test(timeout = 15000)
  public void testMainFailure() throws Exception {
    ContainerLocalizerWrapper wrapper = new ContainerLocalizerWrapper();
    ContainerLocalizer localizer = wrapper.setupContainerLocalizerForTest();
    LocalizationProtocol nmProxy = wrapper.nmProxy;
    mockOutDownloads(localizer);
    // Assume the NM heartbeat fails say because of absent tokens.
    when(nmProxy.heartbeat(isA(LocalizerStatus.class))).thenThrow(
        new YarnException("Sigh, no token!"));
    // run localization, it should fail
    try {
      localizer.runLocalization(nmAddr);
      Assert.fail("Localization succeeded unexpectedly!");
    } catch (IOException e) {
      Assert.assertTrue(e.getMessage().contains("Sigh, no token!"));
    }
  }
  @Test
  @SuppressWarnings("unchecked")
  public void testLocalizerTokenIsGettingRemoved() throws Exception {
    ContainerLocalizerWrapper wrapper = new ContainerLocalizerWrapper();
    ContainerLocalizer localizer = wrapper.setupContainerLocalizerForTest();
    Path tokenPath = wrapper.tokenPath;
    AbstractFileSystem spylfs = wrapper.spylfs;
    mockOutDownloads(localizer);
    doNothing().when(localizer).localizeFiles(any(LocalizationProtocol.class),
        any(CompletionService.class), any(UserGroupInformation.class));
    localizer.runLocalization(nmAddr);
    verify(spylfs, times(1)).delete(tokenPath, false);
  }
  @Test
  @SuppressWarnings("unchecked") // mocked generics
  public void testContainerLocalizerClosesFilesystems() throws Exception {
    // verify filesystems are closed when localizer doesn't fail
    ContainerLocalizerWrapper wrapper = new ContainerLocalizerWrapper();
    ContainerLocalizer localizer = wrapper.setupContainerLocalizerForTest();
    mockOutDownloads(localizer);
    doNothing().when(localizer).localizeFiles(any(LocalizationProtocol.class),
        any(CompletionService.class), any(UserGroupInformation.class));
    verify(localizer, never()).closeFileSystems(
        any(UserGroupInformation.class));
    localizer.runLocalization(nmAddr);
    verify(localizer).closeFileSystems(any(UserGroupInformation.class));
    // verify filesystems are closed when localizer fails
    localizer = wrapper.setupContainerLocalizerForTest();
    doThrow(new YarnRuntimeException("Forced Failure")).when(localizer).localizeFiles(
        any(LocalizationProtocol.class), any(CompletionService.class),
        any(UserGroupInformation.class));
    verify(localizer, never()).closeFileSystems(
        any(UserGroupInformation.class));
    try {
      localizer.runLocalization(nmAddr);
      Assert.fail("Localization succeeded unexpectedly!");
    } catch (IOException e) {
      verify(localizer).closeFileSystems(any(UserGroupInformation.class));
    }
  }
  @Test
  public void testMultipleLocalizers() throws Exception {
    FakeContainerLocalizerWrapper testA = new FakeContainerLocalizerWrapper();
    FakeContainerLocalizerWrapper testB = new FakeContainerLocalizerWrapper();
    FakeContainerLocalizer localizerA = testA.init();
    FakeContainerLocalizer localizerB = testB.init();
    // run localization
    Thread threadA = new Thread() {
      @Override
      public void run() {
        try {
          localizerA.runLocalization(nmAddr);
        } catch (Exception e) {
          LOG.warn(e);
        }
      }
    };
    Thread threadB = new Thread() {
      @Override
      public void run() {
        try {
          localizerB.runLocalization(nmAddr);
        } catch (Exception e) {
          LOG.warn(e);
        }
      }
    };
    ShellCommandExecutor shexcA = null;
    ShellCommandExecutor shexcB = null;
    try {
      threadA.start();
      threadB.start();
      GenericTestUtils.waitFor(new Supplier<Boolean>() {
        @Override
        public Boolean get() {
          FakeContainerLocalizer.FakeLongDownload downloader =
              localizerA.getDownloader();
          return downloader != null && downloader.getShexc() != null &&
              downloader.getShexc().getProcess() != null;
        }
      }, 10, 30000);
      GenericTestUtils.waitFor(new Supplier<Boolean>() {
        @Override
        public Boolean get() {
          FakeContainerLocalizer.FakeLongDownload downloader =
              localizerB.getDownloader();
          return downloader != null && downloader.getShexc() != null &&
              downloader.getShexc().getProcess() != null;
        }
      }, 10, 30000);
      shexcA = localizerA.getDownloader().getShexc();
      shexcB = localizerB.getDownloader().getShexc();
      assertTrue("Localizer A process not running, but should be",
          shexcA.getProcess().isAlive());
      assertTrue("Localizer B process not running, but should be",
          shexcB.getProcess().isAlive());
      // Stop heartbeat from giving anymore resources to download
      testA.heartbeatResponse++;
      testB.heartbeatResponse++;
      // Send DIE to localizerA. This should kill its subprocesses
      testA.heartbeatResponse++;
      threadA.join();
      shexcA.getProcess().waitFor(10000, TimeUnit.MILLISECONDS);
      assertFalse("Localizer A process is still running, but shouldn't be",
          shexcA.getProcess().isAlive());
      assertTrue("Localizer B process not running, but should be",
          shexcB.getProcess().isAlive());
    } finally {
      // Make sure everything gets cleaned up
      // Process A should already be dead
      shexcA.getProcess().destroy();
      shexcB.getProcess().destroy();
      shexcA.getProcess().waitFor(10000, TimeUnit.MILLISECONDS);
      shexcB.getProcess().waitFor(10000, TimeUnit.MILLISECONDS);
      threadA.join();
      // Send DIE to localizer B
      testB.heartbeatResponse++;
      threadB.join();
    }
  }
  private void mockOutDownloads(ContainerLocalizer localizer) {
    // return result instantly for deterministic test
    ExecutorService syncExec = mock(ExecutorService.class);
    CompletionService<Path> cs = mock(CompletionService.class);
    when(cs.submit(isA(Callable.class)))
      .thenAnswer(new Answer<Future<Path>>() {
          @Override
          public Future<Path> answer(InvocationOnMock invoc)
              throws Throwable {
            Future<Path> done = mock(Future.class);
            when(done.isDone()).thenReturn(true);
            FakeDownload d = (FakeDownload) invoc.getArguments()[0];
            when(done.get()).thenReturn(d.call());
            return done;
          }
        });
    doReturn(syncExec).when(localizer).createDownloadThreadPool();
    doReturn(cs).when(localizer).createCompletionService(syncExec);
  }
  static class HBMatches extends ArgumentMatcher<LocalizerStatus> {
    final LocalResource rsrc;
    HBMatches(LocalResource rsrc) {
      this.rsrc = rsrc;
    }
    @Override
    public boolean matches(Object o) {
      LocalizerStatus status = (LocalizerStatus) o;
      for (LocalResourceStatus localized : status.getResources()) {
        switch (localized.getStatus()) {
        case FETCH_SUCCESS:
          if (localized.getLocalPath().getFile().contains(
                rsrc.getResource().getFile())) {
            return true;
          }
          break;
        default:
          fail("Unexpected: " + localized.getStatus());
          break;
        }
      }
      return false;
    }
  }
  static class FakeDownload implements Callable<Path> {
    private final Path localPath;
    private final boolean succeed;
    FakeDownload(String absPath, boolean succeed) {
      this.localPath = new Path("file:///localcache" + absPath);
      this.succeed = succeed;
    }
    @Override
    public Path call() throws IOException {
      if (!succeed) {
        throw new IOException("FAIL " + localPath);
      }
      return localPath;
    }
  }
  class FakeContainerLocalizer extends ContainerLocalizer  {
    private FakeLongDownload downloader;
    FakeContainerLocalizer(FileContext lfs, String user, String appId,
        String localizerId, List<Path> localDirs,
        RecordFactory recordFactory) throws IOException {
      super(lfs, user, appId, localizerId, localDirs, recordFactory);
    }
    FakeLongDownload getDownloader() {
      return downloader;
    }
    @Override
    Callable<Path> download(Path path, LocalResource rsrc,
        UserGroupInformation ugi) throws IOException {
      downloader = new FakeLongDownload(Mockito.mock(FileContext.class), ugi,
          new Configuration(), path, rsrc);
      return downloader;
    }
    class FakeLongDownload extends ContainerLocalizer.FSDownloadWrapper {
      private final Path localPath;
      private Shell.ShellCommandExecutor shexc;
      FakeLongDownload(FileContext files, UserGroupInformation ugi,
          Configuration conf, Path destDirPath, LocalResource resource) {
        super(files, ugi, conf, destDirPath, resource);
        this.localPath = new Path("file:///localcache");
      }
      Shell.ShellCommandExecutor getShexc() {
        return shexc;
      }
      @Override
      public Path doDownloadCall() throws IOException {
        String sleepCommand = "sleep 30";
        String[] shellCmd = {"bash", "-c", sleepCommand};
        shexc = new Shell.ShellCommandExecutor(shellCmd);
        shexc.execute();
        return localPath;
      }
    }
  }
  class ContainerLocalizerWrapper {
    AbstractFileSystem spylfs;
    Random random;
    List<Path> localDirs;
    Path tokenPath;
    LocalizationProtocol nmProxy;
    @SuppressWarnings("unchecked") // mocked generics
    FakeContainerLocalizer setupContainerLocalizerForTest()
        throws Exception {
      FileContext fs = FileContext.getLocalFSFileContext();
      spylfs = spy(fs.getDefaultFileSystem());
      // don't actually create dirs
      doNothing().when(spylfs).mkdir(
          isA(Path.class), isA(FsPermission.class), anyBoolean());
      Configuration conf = new Configuration();
      FileContext lfs = FileContext.getFileContext(spylfs, conf);
      localDirs = new ArrayList<Path>();
      for (int i = 0; i < 4; ++i) {
        localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
      }
      RecordFactory mockRF = getMockLocalizerRecordFactory();
      FakeContainerLocalizer concreteLoc = new FakeContainerLocalizer(lfs,
          appUser, appId, containerId, localDirs, mockRF);
      FakeContainerLocalizer localizer = spy(concreteLoc);
      // return credential stream instead of opening local file
      random = new Random();
      long seed = random.nextLong();
      System.out.println("SEED: " + seed);
      random.setSeed(seed);
      DataInputBuffer appTokens = createFakeCredentials(random, 10);
      tokenPath =
        lfs.makeQualified(new Path(
              String.format(ContainerLocalizer.TOKEN_FILE_NAME_FMT,
                  containerId)));
      doReturn(new FSDataInputStream(new FakeFSDataInputStream(appTokens))
          ).when(spylfs).open(tokenPath);
      nmProxy = mock(LocalizationProtocol.class);
      doReturn(nmProxy).when(localizer).getProxy(nmAddr);
      doNothing().when(localizer).sleep(anyInt());
      return localizer;
    }
  }
  class FakeContainerLocalizerWrapper extends ContainerLocalizerWrapper{
    private int heartbeatResponse = 0;
    public FakeContainerLocalizer init() throws Exception {
      FileContext fs = FileContext.getLocalFSFileContext();
      FakeContainerLocalizer localizer = setupContainerLocalizerForTest();
      // verify created cache
      List<Path> privCacheList = new ArrayList<Path>();
      for (Path p : localDirs) {
        Path base = new Path(new Path(p, ContainerLocalizer.USERCACHE),
            appUser);
        Path privcache = new Path(base, ContainerLocalizer.FILECACHE);
        privCacheList.add(privcache);
      }
      ResourceLocalizationSpec rsrc = getMockRsrc(random,
          LocalResourceVisibility.PRIVATE, privCacheList.get(0));
      // mock heartbeat responses from NM
      doAnswer(new Answer<MockLocalizerHeartbeatResponse>() {
        @Override
        public MockLocalizerHeartbeatResponse answer(
            InvocationOnMock invocationOnMock) throws Throwable {
          if(heartbeatResponse == 0) {
            return new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,
                Collections.singletonList(rsrc));
          } else if (heartbeatResponse < 2) {
            return new MockLocalizerHeartbeatResponse(LocalizerAction.LIVE,
                Collections.<ResourceLocalizationSpec>emptyList());
          } else {
            return new MockLocalizerHeartbeatResponse(LocalizerAction.DIE,
                null);
          }
        }
      }).when(nmProxy).heartbeat(isA(LocalizerStatus.class));
      return localizer;
    }
  }
  static RecordFactory getMockLocalizerRecordFactory() {
    RecordFactory mockRF = mock(RecordFactory.class);
    when(mockRF.newRecordInstance(same(LocalResourceStatus.class)))
      .thenAnswer(new Answer<LocalResourceStatus>() {
          @Override
          public LocalResourceStatus answer(InvocationOnMock invoc)
              throws Throwable {
            return new MockLocalResourceStatus();
          }
        });
    when(mockRF.newRecordInstance(same(LocalizerStatus.class)))
      .thenAnswer(new Answer<LocalizerStatus>() {
          @Override
          public LocalizerStatus answer(InvocationOnMock invoc)
              throws Throwable {
            return new MockLocalizerStatus();
          }
        });
    return mockRF;
  }
  static ResourceLocalizationSpec getMockRsrc(Random r,
      LocalResourceVisibility vis, Path p) {
    ResourceLocalizationSpec resourceLocalizationSpec =
      mock(ResourceLocalizationSpec.class);
    LocalResource rsrc = mock(LocalResource.class);
    String name = Long.toHexString(r.nextLong());
    URL uri = mock(org.apache.hadoop.yarn.api.records.URL.class);
    when(uri.getScheme()).thenReturn("file");
    when(uri.getHost()).thenReturn(null);
    when(uri.getFile()).thenReturn("/local/" + vis + "/" + name);
    when(rsrc.getResource()).thenReturn(uri);
    when(rsrc.getSize()).thenReturn(r.nextInt(1024) + 1024L);
    when(rsrc.getTimestamp()).thenReturn(r.nextInt(1024) + 2048L);
    when(rsrc.getType()).thenReturn(LocalResourceType.FILE);
    when(rsrc.getVisibility()).thenReturn(vis);
    when(resourceLocalizationSpec.getResource()).thenReturn(rsrc);
    when(resourceLocalizationSpec.getDestinationDirectory()).
      thenReturn(URL.fromPath(p));
    return resourceLocalizationSpec;
  }
  @SuppressWarnings({ "rawtypes", "unchecked" })
static DataInputBuffer createFakeCredentials(Random r, int nTok)
      throws IOException {
    Credentials creds = new Credentials();
    byte[] password = new byte[20];
    Text kind = new Text();
    Text service = new Text();
    Text alias = new Text();
    for (int i = 0; i < nTok; ++i) {
      byte[] identifier = ("idef" + i).getBytes();
      r.nextBytes(password);
      kind.set("kind" + i);
      service.set("service" + i);
      alias.set("token" + i);
      Token token = new Token(identifier, password, kind, service);
      creds.addToken(alias, token);
    }
    DataOutputBuffer buf = new DataOutputBuffer();
    creds.writeTokenStorageToStream(buf);
    DataInputBuffer ret = new DataInputBuffer();
    ret.reset(buf.getData(), 0, buf.getLength());
    return ret;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfigurationWithLogging.java 
 | 113 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.conf;
import org.apache.hadoop.classification.InterfaceAudience;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
 * Logs access to {@link Configuration}.
 * Sensitive data will be redacted.
 */
@InterfaceAudience.Private
public class ConfigurationWithLogging extends Configuration {
  private static final Logger LOG =
      LoggerFactory.getLogger(ConfigurationWithLogging.class);
  private final Logger log;
  private final ConfigRedactor redactor;
  public ConfigurationWithLogging(Configuration conf) {
    super(conf);
    log = LOG;
    redactor = new ConfigRedactor(conf);
  }
  /**
   * @see Configuration#get(String).
   */
  @Override
  public String get(String name) {
    String value = super.get(name);
    log.info("Got {} = '{}'", name, redactor.redact(name, value));
    return value;
  }
  /**
   * @see Configuration#get(String, String).
   */
  @Override
  public String get(String name, String defaultValue) {
    String value = super.get(name, defaultValue);
    log.info("Got {} = '{}' (default '{}')", name,
        redactor.redact(name, value), redactor.redact(name, defaultValue));
    return value;
  }
  /**
   * @see Configuration#getBoolean(String, boolean).
   */
  @Override
  public boolean getBoolean(String name, boolean defaultValue) {
    boolean value = super.getBoolean(name, defaultValue);
    log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
    return value;
  }
  /**
   * @see Configuration#getFloat(String, float).
   */
  @Override
  public float getFloat(String name, float defaultValue) {
    float value = super.getFloat(name, defaultValue);
    log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
    return value;
  }
  /**
   * @see Configuration#getInt(String, int).
   */
  @Override
  public int getInt(String name, int defaultValue) {
    int value = super.getInt(name, defaultValue);
    log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
    return value;
  }
  /**
   * @see Configuration#getLong(String, long).
   */
  @Override
  public long getLong(String name, long defaultValue) {
    long value = super.getLong(name, defaultValue);
    log.info("Got {} = '{}' (default '{}')", name, value, defaultValue);
    return value;
  }
  /**
   * @see Configuration#set(String, String, String).
   */
  @Override
  public void set(String name, String value, String source) {
    log.info("Set {} to '{}'{}", name, redactor.redact(name, value),
        source == null ? "" : " from " + source);
    super.set(name, value, source);
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestAuthenticationWithProxyUserFilter.java 
 | 78 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one or more
 * contributor license agreements. See the NOTICE file distributed with this
 * work for additional information regarding copyright ownership. The ASF
 * licenses this file to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 * License for the specific language governing permissions and limitations under
 * the License.
 */
package org.apache.hadoop.security;
import junit.framework.TestCase;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.FilterContainer;
import org.mockito.Mockito;
import org.mockito.invocation.InvocationOnMock;
import org.mockito.stubbing.Answer;
import java.util.Map;
/**
 * This class is tested for {@link AuthenticationWithProxyUserFilter}
 * to verify configurations of this filter.
 */
public class TestAuthenticationWithProxyUserFilter extends TestCase {
  @SuppressWarnings("unchecked")
  public void testConfiguration() throws Exception {
    Configuration conf = new Configuration();
    conf.set("hadoop.http.authentication.foo", "bar");
    conf.set(HttpServer2.BIND_ADDRESS, "barhost");
    FilterContainer container = Mockito.mock(FilterContainer.class);
    Mockito.doAnswer(
      new Answer() {
        @Override
        public Object answer(InvocationOnMock invocationOnMock)
          throws Throwable {
          Object[] args = invocationOnMock.getArguments();
          assertEquals("authentication", args[0]);
          assertEquals(
              AuthenticationWithProxyUserFilter.class.getName(), args[1]);
          Map<String, String> conf = (Map<String, String>) args[2];
          assertEquals("/", conf.get("cookie.path"));
          assertEquals("simple", conf.get("type"));
          assertEquals("36000", conf.get("token.validity"));
          assertNull(conf.get("cookie.domain"));
          assertEquals("true", conf.get("simple.anonymous.allowed"));
          assertEquals("HTTP/barhost@LOCALHOST",
                       conf.get("kerberos.principal"));
          assertEquals(System.getProperty("user.home") +
                       "/hadoop.keytab", conf.get("kerberos.keytab"));
          assertEquals("bar", conf.get("foo"));
          return null;
        }
      }
    ).when(container).addFilter(Mockito.<String>anyObject(),
                                Mockito.<String>anyObject(),
                                Mockito.<Map<String, String>>anyObject());
    new AuthenticationFilterInitializer().initFilter(container, conf);
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/QueueACLsManager.java 
 | 66 
							 | 
	/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements.  See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership.  The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License.  You may obtain a copy of the License at
*
*     http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.security;
import com.google.common.annotations.VisibleForTesting;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.QueueACL;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.AccessRequest;
import org.apache.hadoop.yarn.security.YarnAuthorizationProvider;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSQueue;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
import java.util.List;
public class QueueACLsManager {
  private static final Log LOG = LogFactory.getLog(QueueACLsManager.class);
  private ResourceScheduler scheduler;
  private boolean isACLsEnable;
  private YarnAuthorizationProvider authorizer;
  @VisibleForTesting
  public QueueACLsManager() {
    this(null, new Configuration());
  }
  public QueueACLsManager(ResourceScheduler scheduler, Configuration conf) {
    this.scheduler = scheduler;
    this.isACLsEnable = conf.getBoolean(YarnConfiguration.YARN_ACL_ENABLE,
        YarnConfiguration.DEFAULT_YARN_ACL_ENABLE);
    this.authorizer = YarnAuthorizationProvider.getInstance(conf);
  }
  public boolean checkAccess(UserGroupInformation callerUGI, QueueACL acl,
      RMApp app, String remoteAddress, List<String> forwardedAddresses) {
    if (!isACLsEnable) {
      return true;
    }
    if (scheduler instanceof CapacityScheduler) {
      CSQueue queue = ((CapacityScheduler) scheduler).getQueue(app.getQueue());
      if (queue == null) {
        // The application exists but the associated queue does not exist.
        // This may be due to a queue that is not defined when the RM restarts.
        // At this point we choose to log the fact and allow users to access
        // and view the apps in a removed queue. This should only happen on
        // application recovery.
        LOG.error("Queue " + app.getQueue() + " does not exist for " + app
            .getApplicationId());
        return true;
      }
      return authorizer.checkPermission(
          new AccessRequest(queue.getPrivilegedEntity(), callerUGI,
              SchedulerUtils.toAccessType(acl),
              app.getApplicationId().toString(), app.getName(),
              remoteAddress, forwardedAddresses));
    } else {
      return scheduler.checkAccess(callerUGI, acl, app.getQueue());
    }
  }
  /**
   * Check access to a targetQueue in the case of a move of an application.
   * The application cannot contain the destination queue since it has not
   * been moved yet, thus need to pass it in separately.
   *
   * @param callerUGI the caller UGI
   * @param acl the acl for the Queue to check
   * @param app the application to move
   * @param remoteAddress server ip address
   * @param forwardedAddresses forwarded adresses
   * @param targetQueue the name of the queue to move the application to
   * @return true: if submission is allowed and queue exists,
   *         false: in all other cases (also non existing target queue)
   */
  public boolean checkAccess(UserGroupInformation callerUGI, QueueACL acl,
      RMApp app, String remoteAddress, List<String> forwardedAddresses,
      String targetQueue) {
    if (!isACLsEnable) {
      return true;
    }
    // Based on the discussion in YARN-5554 detail on why there are two
    // versions:
    // The access check inside these calls is currently scheduler dependent.
    // This is due to the extra parameters needed for the CS case which are not
    // in the version defined in the YarnScheduler interface. The second
    // version is added for the moving the application case. The check has
    // extra logging to distinguish between the queue not existing in the
    // application move request case and the real access denied case.
    if (scheduler instanceof CapacityScheduler) {
      CSQueue queue = ((CapacityScheduler) scheduler).getQueue(targetQueue);
      if (queue == null) {
        LOG.warn("Target queue " + targetQueue
            + " does not exist while trying to move "
            + app.getApplicationId());
        return false;
      }
      return authorizer.checkPermission(
          new AccessRequest(queue.getPrivilegedEntity(), callerUGI,
              SchedulerUtils.toAccessType(acl),
              app.getApplicationId().toString(), app.getName(),
              remoteAddress, forwardedAddresses));
    } else if (scheduler instanceof FairScheduler) {
      FSQueue queue = ((FairScheduler) scheduler).getQueueManager().
          getQueue(targetQueue);
      if (queue == null) {
        LOG.warn("Target queue " + targetQueue
            + " does not exist while trying to move "
            + app.getApplicationId());
        return false;
      }
      return scheduler.checkAccess(callerUGI, acl, targetQueue);
    } else {
      // Any other scheduler just try
      return scheduler.checkAccess(callerUGI, acl, targetQueue);
    }
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.java 
 | 305 
							 | 
	/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.io.compress.zstd;
import com.google.common.annotations.VisibleForTesting;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.compress.Compressor;
import org.apache.hadoop.io.compress.ZStandardCodec;
import org.apache.hadoop.util.NativeCodeLoader;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
 * A {@link Compressor} based on the zStandard compression algorithm.
 * https://github.com/facebook/zstd
 */
public class ZStandardCompressor implements Compressor {
  private static final Logger LOG =
      LoggerFactory.getLogger(ZStandardCompressor.class);
  private long stream;
  private int level;
  private int directBufferSize;
  private byte[] userBuf = null;
  private int userBufOff = 0, userBufLen = 0;
  private ByteBuffer uncompressedDirectBuf = null;
  private int uncompressedDirectBufOff = 0, uncompressedDirectBufLen = 0;
  private boolean keepUncompressedBuf = false;
  private ByteBuffer compressedDirectBuf = null;
  private boolean finish, finished;
  private long bytesRead = 0;
  private long bytesWritten = 0;
  private static boolean nativeZStandardLoaded = false;
  static {
    if (NativeCodeLoader.isNativeCodeLoaded()) {
      try {
        // Initialize the native library
        initIDs();
        nativeZStandardLoaded = true;
      } catch (Throwable t) {
        LOG.warn("Error loading zstandard native libraries: " + t);
      }
    }
  }
  public static boolean isNativeCodeLoaded() {
    return nativeZStandardLoaded;
  }
  public static int getRecommendedBufferSize() {
    return getStreamSize();
  }
  @VisibleForTesting
  ZStandardCompressor() {
    this(CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_DEFAULT,
        CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT);
  }
  /**
   * Creates a new compressor with the default compression level.
   * Compressed data will be generated in ZStandard format.
   */
  public ZStandardCompressor(int level, int bufferSize) {
    this(level, bufferSize, bufferSize);
  }
  @VisibleForTesting
  ZStandardCompressor(int level, int inputBufferSize, int outputBufferSize) {
    this.level = level;
    stream = create();
    this.directBufferSize = outputBufferSize;
    uncompressedDirectBuf = ByteBuffer.allocateDirect(inputBufferSize);
    compressedDirectBuf = ByteBuffer.allocateDirect(outputBufferSize);
    compressedDirectBuf.position(outputBufferSize);
    reset();
  }
  /**
   * Prepare the compressor to be used in a new stream with settings defined in
   * the given Configuration. It will reset the compressor's compression level
   * and compression strategy.
   *
   * @param conf Configuration storing new settings
   */
  @Override
  public void reinit(Configuration conf) {
    if (conf == null) {
      return;
    }
    level = ZStandardCodec.getCompressionLevel(conf);
    reset();
    LOG.debug("Reinit compressor with new compression configuration");
  }
  @Override
  public void setInput(byte[] b, int off, int len) {
    if (b == null) {
      throw new NullPointerException();
    }
    if (off < 0 || len < 0 || off > b.length - len) {
      throw new ArrayIndexOutOfBoundsException();
    }
    this.userBuf = b;
    this.userBufOff = off;
    this.userBufLen = len;
    uncompressedDirectBufOff = 0;
    setInputFromSavedData();
    compressedDirectBuf.limit(directBufferSize);
    compressedDirectBuf.position(directBufferSize);
  }
  //copy enough data from userBuf to uncompressedDirectBuf
  private void setInputFromSavedData() {
    int len = Math.min(userBufLen, uncompressedDirectBuf.remaining());
    uncompressedDirectBuf.put(userBuf, userBufOff, len);
    userBufLen -= len;
    userBufOff += len;
    uncompressedDirectBufLen = uncompressedDirectBuf.position();
  }
  @Override
  public void setDictionary(byte[] b, int off, int len) {
    throw new UnsupportedOperationException(
        "Dictionary support is not enabled");
  }
  @Override
  public boolean needsInput() {
    // Consume remaining compressed data?
    if (compressedDirectBuf.remaining() > 0) {
      return false;
    }
    // have we consumed all input
    if (keepUncompressedBuf && uncompressedDirectBufLen > 0) {
      return false;
    }
    if (uncompressedDirectBuf.remaining() > 0) {
      // Check if we have consumed all user-input
      if (userBufLen <= 0) {
        return true;
      } else {
        // copy enough data from userBuf to uncompressedDirectBuf
        setInputFromSavedData();
        // uncompressedDirectBuf is not full
        return uncompressedDirectBuf.remaining() > 0;
      }
    }
    return false;
  }
  @Override
  public void finish() {
    finish = true;
  }
  @Override
  public boolean finished() {
    // Check if 'zstd' says its 'finished' and all compressed
    // data has been consumed
    return (finished && compressedDirectBuf.remaining() == 0);
  }
  @Override
  public int compress(byte[] b, int off, int len) throws IOException {
    checkStream();
    if (b == null) {
      throw new NullPointerException();
    }
    if (off < 0 || len < 0 || off > b.length - len) {
      throw new ArrayIndexOutOfBoundsException();
    }
    // Check if there is compressed data
    int n = compressedDirectBuf.remaining();
    if (n > 0) {
      n = Math.min(n, len);
      compressedDirectBuf.get(b, off, n);
      return n;
    }
    // Re-initialize the output direct buffer
    compressedDirectBuf.rewind();
    compressedDirectBuf.limit(directBufferSize);
    // Compress data
    n = deflateBytesDirect(
        uncompressedDirectBuf,
        uncompressedDirectBufOff,
        uncompressedDirectBufLen,
        compressedDirectBuf,
        directBufferSize
    );
    compressedDirectBuf.limit(n);
    // Check if we have consumed all input buffer
    if (uncompressedDirectBufLen <= 0) {
      // consumed all input buffer
      keepUncompressedBuf = false;
      uncompressedDirectBuf.clear();
      uncompressedDirectBufOff = 0;
      uncompressedDirectBufLen = 0;
    } else {
      //  did not consume all input buffer
      keepUncompressedBuf = true;
    }
    // Get at most 'len' bytes
    n = Math.min(n, len);
    compressedDirectBuf.get(b, off, n);
    return n;
  }
  /**
   * Returns the total number of compressed bytes output so far.
   *
   * @return the total (non-negative) number of compressed bytes output so far
   */
  @Override
  public long getBytesWritten() {
    checkStream();
    return bytesWritten;
  }
  /**
   * <p>Returns the total number of uncompressed bytes input so far.</p>
   *
   * @return the total (non-negative) number of uncompressed bytes input so far
   */
  @Override
  public long getBytesRead() {
    checkStream();
    return bytesRead;
  }
  @Override
  public void reset() {
    checkStream();
    init(level, stream);
    finish = false;
    finished = false;
    bytesRead = 0;
    bytesWritten = 0;
    uncompressedDirectBuf.rewind();
    uncompressedDirectBufOff = 0;
    uncompressedDirectBufLen = 0;
    keepUncompressedBuf = false;
    compressedDirectBuf.limit(directBufferSize);
    compressedDirectBuf.position(directBufferSize);
    userBufOff = 0;
    userBufLen = 0;
  }
  @Override
  public void end() {
    if (stream != 0) {
      end(stream);
      stream = 0;
    }
  }
  private void checkStream() {
    if (stream == 0) {
      throw new NullPointerException();
    }
  }
  private native static long create();
  private native static void init(int level, long stream);
  private native int deflateBytesDirect(ByteBuffer src, int srcOffset,
      int srcLen, ByteBuffer dst, int dstLen);
  private static native int getStreamSize();
  private native static void end(long strm);
  private native static void initIDs();
  public native static String getLibraryName();
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/zstd/ZStandardCompressor.c 
 | 259 
							 | 
	/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#include "org_apache_hadoop_io_compress_zstd.h"
#if defined HADOOP_ZSTD_LIBRARY
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#ifdef UNIX
#include <dlfcn.h>
#include "config.h"
#endif
#include "org_apache_hadoop_io_compress_zstd_ZStandardCompressor.h"
static jfieldID ZStandardCompressor_stream;
static jfieldID ZStandardCompressor_uncompressedDirectBufOff;
static jfieldID ZStandardCompressor_uncompressedDirectBufLen;
static jfieldID ZStandardCompressor_directBufferSize;
static jfieldID ZStandardCompressor_finish;
static jfieldID ZStandardCompressor_finished;
static jfieldID ZStandardCompressor_bytesWritten;
static jfieldID ZStandardCompressor_bytesRead;
#ifdef UNIX
static size_t (*dlsym_ZSTD_CStreamInSize)(void);
static size_t (*dlsym_ZSTD_CStreamOutSize)(void);
static ZSTD_CStream* (*dlsym_ZSTD_createCStream)(void);
static size_t (*dlsym_ZSTD_initCStream)(ZSTD_CStream*, int);
static size_t (*dlsym_ZSTD_freeCStream)(ZSTD_CStream*);
static size_t (*dlsym_ZSTD_compressStream)(ZSTD_CStream*, ZSTD_outBuffer*, ZSTD_inBuffer*);
static size_t (*dlsym_ZSTD_endStream)(ZSTD_CStream*, ZSTD_outBuffer*);
static size_t (*dlsym_ZSTD_flushStream)(ZSTD_CStream*, ZSTD_outBuffer*);
static unsigned (*dlsym_ZSTD_isError)(size_t);
static const char * (*dlsym_ZSTD_getErrorName)(size_t);
#endif
#ifdef WINDOWS
typedef size_t (__cdecl *__dlsym_ZSTD_CStreamInSize)(void);
typedef size_t (__cdecl *__dlsym_ZSTD_CStreamOutSize)(void);
typedef ZSTD_CStream* (__cdecl *__dlsym_ZSTD_createCStream)(void);
typedef size_t (__cdecl *__dlsym_ZSTD_initCStream)(ZSTD_CStream*, int);
typedef size_t (__cdecl *__dlsym_ZSTD_freeCStream)(ZSTD_CStream*);
typedef size_t (__cdecl *__dlsym_ZSTD_compressStream)(ZSTD_CStream*, ZSTD_outBuffer*, ZSTD_inBuffer*);
typedef size_t (__cdecl *__dlsym_ZSTD_endStream)(ZSTD_CStream*, ZSTD_outBuffer*);
typedef size_t (__cdecl *__dlsym_ZSTD_flushStream)(ZSTD_CStream*, ZSTD_outBuffer*);
typedef unsigned (__cdecl *__dlsym_ZSTD_isError)(size_t);
typedef const char * (__cdecl *__dlsym_ZSTD_getErrorName)(size_t);
static __dlsym_ZSTD_CStreamInSize dlsym_ZSTD_CStreamInSize;
static __dlsym_ZSTD_CStreamOutSize dlsym_ZSTD_CStreamOutSize;
static __dlsym_ZSTD_createCStream dlsym_ZSTD_createCStream;
static __dlsym_ZSTD_initCStream dlsym_ZSTD_initCStream;
static __dlsym_ZSTD_freeCStream dlsym_ZSTD_freeCStream;
static __dlsym_ZSTD_compressStream dlsym_ZSTD_compressStream;
static __dlsym_ZSTD_endStream dlsym_ZSTD_endStream;
static __dlsym_ZSTD_flushStream dlsym_ZSTD_flushStream;
static __dlsym_ZSTD_isError dlsym_ZSTD_isError;
static __dlsym_ZSTD_getErrorName dlsym_ZSTD_getErrorName;
#endif
// Load the libztsd.so from disk
JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_initIDs (JNIEnv *env, jclass clazz) {
#ifdef UNIX
    // Load libzstd.so
    void *libzstd = dlopen(HADOOP_ZSTD_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
    if (!libzstd) {
        char* msg = (char*)malloc(10000);
        snprintf(msg, 10000, "%s (%s)!", "Cannot load " HADOOP_ZSTD_LIBRARY, dlerror());
        THROW(env, "java/lang/InternalError", msg);
        return;
    }
#endif
#ifdef WINDOWS
    HMODULE libzstd = LoadLibrary(HADOOP_ZSTD_LIBRARY);
    if (!libzstd) {
        THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load zstd.dll");
        return;
    }
#endif
#ifdef UNIX
    // load dynamic symbols
    dlerror();
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_CStreamInSize, env, libzstd, "ZSTD_CStreamInSize");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_CStreamOutSize, env, libzstd, "ZSTD_CStreamOutSize");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_createCStream, env, libzstd, "ZSTD_createCStream");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_initCStream, env, libzstd, "ZSTD_initCStream");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_freeCStream, env, libzstd, "ZSTD_freeCStream");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_compressStream, env, libzstd, "ZSTD_compressStream");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_endStream, env, libzstd, "ZSTD_endStream");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_flushStream, env, libzstd, "ZSTD_flushStream");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_isError, env, libzstd, "ZSTD_isError");
    LOAD_DYNAMIC_SYMBOL(dlsym_ZSTD_getErrorName, env, libzstd, "ZSTD_getErrorName");
#endif
#ifdef WINDOWS
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_CStreamInSize, dlsym_ZSTD_CStreamInSize, env, libzstd, "ZSTD_CStreamInSize");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_CStreamOutSize, dlsym_ZSTD_CStreamOutSize, env, libzstd, "ZSTD_CStreamOutSize");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_createCStream, dlsym_ZSTD_createCStream, env, libzstd, "ZSTD_createCStream");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_initCStream, dlsym_ZSTD_initCStream, env, libzstd, "ZSTD_initCStream");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_freeCStream, dlsym_ZSTD_freeCStream, env, libzstd, "ZSTD_freeCStream");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_compressStream, dlsym_ZSTD_compressStream, env, libzstd, "ZSTD_compressStream");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_endStream, dlsym_ZSTD_endStream, env, libzstd, "ZSTD_endStream");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_flushStream, dlsym_ZSTD_flushStream, env, libzstd, "ZSTD_flushStream");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_isError, dlsym_ZSTD_isError, env, libzstd, "ZSTD_isError");
    LOAD_DYNAMIC_SYMBOL(__dlsym_ZSTD_getErrorName, dlsym_ZSTD_getErrorName, env, libzstd, "ZSTD_getErrorName");
#endif
    // load fields
    ZStandardCompressor_stream = (*env)->GetFieldID(env, clazz, "stream", "J");
    ZStandardCompressor_finish = (*env)->GetFieldID(env, clazz, "finish", "Z");
    ZStandardCompressor_finished = (*env)->GetFieldID(env, clazz, "finished", "Z");
    ZStandardCompressor_uncompressedDirectBufOff = (*env)->GetFieldID(env, clazz, "uncompressedDirectBufOff", "I");
    ZStandardCompressor_uncompressedDirectBufLen = (*env)->GetFieldID(env, clazz, "uncompressedDirectBufLen", "I");
    ZStandardCompressor_directBufferSize = (*env)->GetFieldID(env, clazz, "directBufferSize", "I");
    ZStandardCompressor_bytesRead = (*env)->GetFieldID(env, clazz, "bytesRead", "J");
    ZStandardCompressor_bytesWritten = (*env)->GetFieldID(env, clazz, "bytesWritten", "J");
}
// Create the compression stream
JNIEXPORT jlong JNICALL Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_create (JNIEnv *env, jobject this) {
    ZSTD_CStream* const stream =  dlsym_ZSTD_createCStream();
    if (stream == NULL) {
        THROW(env, "java/lang/InternalError", "Error creating the stream");
        return (jlong)0;
    }
    return (jlong) stream;
}
// Initialize the compression stream
JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_init (JNIEnv *env, jobject this, jint level, jlong stream) {
    size_t result = dlsym_ZSTD_initCStream((ZSTD_CStream *) stream, level);
    if (dlsym_ZSTD_isError(result)) {
        THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
        return;
    }
}
// free the compression stream
JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_end (JNIEnv *env, jobject this, jlong stream) {
    size_t result = dlsym_ZSTD_freeCStream((ZSTD_CStream *) stream);
    if (dlsym_ZSTD_isError(result)) {
        THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(result));
        return;
    }
}
JNIEXPORT jint Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_deflateBytesDirect
(JNIEnv *env, jobject this, jobject uncompressed_direct_buf, jint uncompressed_direct_buf_off, jint uncompressed_direct_buf_len, jobject compressed_direct_buf, jint compressed_direct_buf_len ) {
    ZSTD_CStream* const stream = (ZSTD_CStream*) (*env)->GetLongField(env, this, ZStandardCompressor_stream);
    if (!stream) {
        THROW(env, "java/lang/NullPointerException", NULL);
        return (jint)0;
    }
    jlong bytes_read = (*env)->GetLongField(env, this, ZStandardCompressor_bytesRead);
    jlong bytes_written = (*env)->GetLongField(env, this, ZStandardCompressor_bytesWritten);
    jboolean finish = (*env)->GetBooleanField(env, this, ZStandardCompressor_finish);
    // Get the input direct buffer
    void * uncompressed_bytes = (*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
    if (!uncompressed_bytes) {
        THROW(env, "java/lang/InternalError", "Undefined memory address for uncompressedDirectBuf");
        return (jint) 0;
    }
    // Get the output direct buffer
    void * compressed_bytes = (*env)->GetDirectBufferAddress(env, compressed_direct_buf);
    if (!compressed_bytes) {
        THROW(env, "java/lang/InternalError", "Undefined memory address for compressedDirectBuf");
        return (jint) 0;
    }
    ZSTD_inBuffer input = { uncompressed_bytes, uncompressed_direct_buf_len, uncompressed_direct_buf_off };
    ZSTD_outBuffer output = { compressed_bytes, compressed_direct_buf_len, 0 };
    size_t size = dlsym_ZSTD_compressStream(stream, &output, &input);
    if (dlsym_ZSTD_isError(size)) {
        THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(size));
        return (jint) 0;
    }
    if (finish && input.pos == input.size) {
        // end the stream, flush and  write the frame epilogue
        size = dlsym_ZSTD_endStream(stream, &output);
        if (!size) {
            (*env)->SetBooleanField(env, this, ZStandardCompressor_finished, JNI_TRUE);
        }
    } else {
        // need to flush the output buffer
        // this also updates the output buffer position.
        size = dlsym_ZSTD_flushStream(stream, &output);
    }
    if (dlsym_ZSTD_isError(size)) {
        THROW(env, "java/lang/InternalError", dlsym_ZSTD_getErrorName(size));
        return (jint) 0;
    }
    bytes_read += input.pos;
    bytes_written += output.pos;
    (*env)->SetLongField(env, this, ZStandardCompressor_bytesRead, bytes_read);
    (*env)->SetLongField(env, this, ZStandardCompressor_bytesWritten, bytes_written);
    (*env)->SetIntField(env, this, ZStandardCompressor_uncompressedDirectBufOff, input.pos);
    (*env)->SetIntField(env, this, ZStandardCompressor_uncompressedDirectBufLen, input.size - input.pos);
    return (jint) output.pos;
}
JNIEXPORT jstring JNICALL Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_getLibraryName
(JNIEnv *env, jclass class) {
#ifdef UNIX
    if (dlsym_ZSTD_isError) {
        Dl_info dl_info;
        if (dladdr( dlsym_ZSTD_isError, &dl_info)) {
            return (*env)->NewStringUTF(env, dl_info.dli_fname);
        }
    }
    return (*env)->NewStringUTF(env, HADOOP_ZSTD_LIBRARY);
#endif
#ifdef WINDOWS
    LPWSTR filename = NULL;
    GetLibraryName(dlsym_ZSTD_isError, &filename);
    if (filename != NULL) {
        return (*env)->NewString(env, filename, (jsize) wcslen(filename));
    } else {
        return (*env)->NewStringUTF(env, "Unavailable");
    }
#endif
}
// returns the max size of the recommended input and output buffers
JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_zstd_ZStandardCompressor_getStreamSize
(JNIEnv *env, jobject this) {
    int x = (int) dlsym_ZSTD_CStreamInSize();
    int y = (int) dlsym_ZSTD_CStreamOutSize();
    return (x >= y) ? x : y;
}
#endif //define HADOOP_ZSTD_LIBRARY 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationWithProxyUserFilter.java 
 | 119 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 * <p>
 * http://www.apache.org/licenses/LICENSE-2.0
 * <p>
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.security;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authorize.AuthorizationException;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.apache.hadoop.util.HttpExceptionUtils;
import org.apache.http.NameValuePair;
import org.apache.http.client.utils.URLEncodedUtils;
import javax.servlet.FilterChain;
import javax.servlet.ServletException;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletRequestWrapper;
import javax.servlet.http.HttpServletResponse;
import java.io.IOException;
import java.nio.charset.Charset;
import java.util.List;
/**
 * Extend the function of {@link AuthenticationFilter} to
 * support authorizing proxy user. If the query string
 * contains doAs parameter, then check the proxy user,
 * otherwise do the next filter.
 */
public class AuthenticationWithProxyUserFilter extends AuthenticationFilter {
  /**
   * Constant used in URL's query string to perform a proxy user request, the
   * value of the <code>DO_AS</code> parameter is the user the request will be
   * done on behalf of.
   */
  private static final String DO_AS = "doAs";
  private static final Charset UTF8_CHARSET = Charset.forName("UTF-8");
  /**
   * This method provide the ability to do pre/post tasks
   * in filter chain. Override this method to authorize
   * proxy user between AuthenticationFilter and next filter.
   * @param filterChain the filter chain object.
   * @param request the request object.
   * @param response the response object.
   *
   * @throws IOException
   * @throws ServletException
   */
  @Override
  protected void doFilter(FilterChain filterChain, HttpServletRequest request,
      HttpServletResponse response) throws IOException, ServletException {
    // authorize proxy user before calling next filter.
    String proxyUser = getDoAs(request);
    if (proxyUser != null) {
      UserGroupInformation realUser =
          UserGroupInformation.createRemoteUser(request.getRemoteUser());
      UserGroupInformation proxyUserInfo =
          UserGroupInformation.createProxyUser(proxyUser, realUser);
      try {
        ProxyUsers.authorize(proxyUserInfo, request.getRemoteAddr());
      } catch (AuthorizationException ex) {
        HttpExceptionUtils.createServletExceptionResponse(response,
            HttpServletResponse.SC_FORBIDDEN, ex);
        // stop filter chain if there is an Authorization Exception.
        return;
      }
      final UserGroupInformation finalProxyUser = proxyUserInfo;
      // Change the remote user after proxy user is authorized.
      request = new HttpServletRequestWrapper(request) {
        @Override
        public String getRemoteUser() {
          return finalProxyUser.getUserName();
        }
      };
    }
    filterChain.doFilter(request, response);
  }
  /**
   * Get proxy user from query string.
   * @param request the request object
   * @return proxy user
   */
  public static String getDoAs(HttpServletRequest request) {
    String queryString = request.getQueryString();
    if (queryString == null) {
      return null;
    }
    List<NameValuePair> list = URLEncodedUtils.parse(queryString, UTF8_CHARSET);
    if (list != null) {
      for (NameValuePair nv : list) {
        if (DO_AS.equalsIgnoreCase(nv.getName())) {
          return nv.getValue();
        }
      }
    }
    return null;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ReadWriteDiskValidator.java 
 | 95 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.util;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.util.Arrays;
import java.util.Random;
import java.util.concurrent.TimeUnit;
/**
 * ReadWriteDiskValidator is the class to check a directory by to create a file,
 * write some bytes into it, read back, and verify if they are identical.
 * Read time and write time are recorded and put into an
 * {@link ReadWriteDiskValidatorMetrics}.
 */
public class ReadWriteDiskValidator implements DiskValidator {
  public static final String NAME = "read-write";
  private static final Random RANDOM = new Random();
  @Override
  public void checkStatus(File dir) throws DiskErrorException {
    ReadWriteDiskValidatorMetrics metric =
        ReadWriteDiskValidatorMetrics.getMetric(dir.toString());
    Path tmpFile = null;
    try {
      if (!dir.isDirectory()) {
        metric.diskCheckFailed();
        throw new DiskErrorException(dir + " is not a directory!");
      }
      // check the directory presence and permission.
      DiskChecker.checkDir(dir);
      // create a tmp file under the dir
      tmpFile = Files.createTempFile(dir.toPath(), "test", "tmp");
      // write 16 bytes into the tmp file
      byte[] inputBytes = new byte[16];
      RANDOM.nextBytes(inputBytes);
      long startTime = System.nanoTime();
      Files.write(tmpFile, inputBytes);
      long writeLatency = TimeUnit.MICROSECONDS.convert(
          System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
      metric.addWriteFileLatency(writeLatency);
      // read back
      startTime = System.nanoTime();
      byte[] outputBytes = Files.readAllBytes(tmpFile);
      long readLatency = TimeUnit.MICROSECONDS.convert(
          System.nanoTime() - startTime, TimeUnit.NANOSECONDS);
      metric.addReadFileLatency(readLatency);
      // validation
      if (!Arrays.equals(inputBytes, outputBytes)) {
        metric.diskCheckFailed();
        throw new DiskErrorException("Data in file has been corrupted.");
      }
    } catch (IOException e) {
      metric.diskCheckFailed();
      throw new DiskErrorException("Disk Check failed!", e);
    } finally {
      // delete the file
      if (tmpFile != null) {
        try {
          Files.delete(tmpFile);
        } catch (IOException e) {
          metric.diskCheckFailed();
          throw new DiskErrorException("File deletion failed!", e);
        }
      }
    }
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriorityACLConfiguration.java 
 | 120 
							 | 
	/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements.  See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership.  The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License.  You may obtain a copy of the License at
*
*     http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.util.List;
import org.apache.hadoop.yarn.api.records.Priority;
import org.junit.Assert;
import org.junit.Test;
public class TestApplicationPriorityACLConfiguration {
  private final int defaultPriorityQueueA = 3;
  private final int defaultPriorityQueueB = -1;
  private final int maxPriorityQueueA = 5;
  private final int maxPriorityQueueB = 10;
  private final int clusterMaxPriority = 10;
  private static final String QUEUE_A_USER = "queueA_user";
  private static final String QUEUE_B_USER = "queueB_user";
  private static final String QUEUE_A_GROUP = "queueA_group";
  private static final String QUEUEA = "queueA";
  private static final String QUEUEB = "queueB";
  private static final String QUEUEC = "queueC";
  @Test
  public void testSimpleACLConfiguration() throws Exception {
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    csConf.setQueues(CapacitySchedulerConfiguration.ROOT,
        new String[]{QUEUEA, QUEUEB, QUEUEC});
    csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + QUEUEA, 50f);
    csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + QUEUEB, 25f);
    csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + QUEUEC, 25f);
    // Success case: Configure one user/group level priority acl for queue A.
    String[] aclsForA = new String[2];
    aclsForA[0] = QUEUE_A_USER;
    aclsForA[1] = QUEUE_A_GROUP;
    csConf.setPriorityAcls(CapacitySchedulerConfiguration.ROOT + "." + QUEUEA,
        Priority.newInstance(maxPriorityQueueA),
        Priority.newInstance(defaultPriorityQueueA), aclsForA);
    // Try to get the ACL configs and make sure there are errors/exceptions
    List<AppPriorityACLGroup> pGroupA = csConf.getPriorityAcls(
        CapacitySchedulerConfiguration.ROOT + "." + QUEUEA,
        Priority.newInstance(clusterMaxPriority));
    // Validate!
    verifyACLs(pGroupA, QUEUE_A_USER, QUEUE_A_GROUP, maxPriorityQueueA,
        defaultPriorityQueueA);
  }
  @Test
  public void testACLConfigurationForInvalidCases() throws Exception {
    CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
    csConf.setQueues(CapacitySchedulerConfiguration.ROOT,
        new String[]{QUEUEA, QUEUEB, QUEUEC});
    csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + QUEUEA, 50f);
    csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + QUEUEB, 25f);
    csConf.setCapacity(CapacitySchedulerConfiguration.ROOT + "." + QUEUEC, 25f);
    // Success case: Configure one user/group level priority acl for queue A.
    String[] aclsForA = new String[2];
    aclsForA[0] = QUEUE_A_USER;
    aclsForA[1] = QUEUE_A_GROUP;
    csConf.setPriorityAcls(CapacitySchedulerConfiguration.ROOT + "." + QUEUEA,
        Priority.newInstance(maxPriorityQueueA),
        Priority.newInstance(defaultPriorityQueueA), aclsForA);
    String[] aclsForB = new String[1];
    aclsForB[0] = QUEUE_B_USER;
    csConf.setPriorityAcls(CapacitySchedulerConfiguration.ROOT + "." + QUEUEB,
        Priority.newInstance(maxPriorityQueueB),
        Priority.newInstance(defaultPriorityQueueB), aclsForB);
    // Try to get the ACL configs and make sure there are errors/exceptions
    List<AppPriorityACLGroup> pGroupA = csConf.getPriorityAcls(
        CapacitySchedulerConfiguration.ROOT + "." + QUEUEA,
        Priority.newInstance(clusterMaxPriority));
    List<AppPriorityACLGroup> pGroupB = csConf.getPriorityAcls(
        CapacitySchedulerConfiguration.ROOT + "." + QUEUEB,
        Priority.newInstance(clusterMaxPriority));
    // Validate stored ACL values with configured ones.
    verifyACLs(pGroupA, QUEUE_A_USER, QUEUE_A_GROUP, maxPriorityQueueA,
        defaultPriorityQueueA);
    verifyACLs(pGroupB, QUEUE_B_USER, "", maxPriorityQueueB, 0);
  }
  private void verifyACLs(List<AppPriorityACLGroup> pGroup, String queueUser,
      String queueGroup, int maxPriority, int defaultPriority) {
    AppPriorityACLGroup group = pGroup.get(0);
    String aclString = queueUser + " " + queueGroup;
    Assert.assertEquals(aclString.trim(),
        group.getACLList().getAclString().trim());
    Assert.assertEquals(maxPriority, group.getMaxPriority().getPriority());
    Assert.assertEquals(defaultPriority,
        group.getDefaultPriority().getPriority());
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerWithSpengo.java 
 | 239 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 * <p>
 * http://www.apache.org/licenses/LICENSE-2.0
 * <p>
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.http;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.minikdc.MiniKdc;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AuthenticationFilterInitializer;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.KerberosTestUtils;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.AuthenticationToken;
import org.apache.hadoop.security.authentication.util.Signer;
import org.apache.hadoop.security.authentication.util.SignerSecretProvider;
import org.apache.hadoop.security.authentication.util.StringSignerSecretProviderCreator;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.authorize.ProxyUsers;
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
import org.junit.Assert;
import java.io.File;
import java.io.FileWriter;
import java.io.Writer;
import java.net.HttpURLConnection;
import java.net.URI;
import java.net.URL;
import java.util.Properties;
import static org.junit.Assert.assertTrue;
/**
 * This class is tested for http server with SPENGO authentication.
 */
public class TestHttpServerWithSpengo {
  static final Log LOG = LogFactory.getLog(TestHttpServerWithSpengo.class);
  private static final String SECRET_STR = "secret";
  private static final String HTTP_USER = "HTTP";
  private static final String PREFIX = "hadoop.http.authentication.";
  private static final long TIMEOUT = 20000;
  private static File httpSpnegoKeytabFile = new File(
      KerberosTestUtils.getKeytabFile());
  private static String httpSpnegoPrincipal =
      KerberosTestUtils.getServerPrincipal();
  private static String realm = KerberosTestUtils.getRealm();
  private static File testRootDir = new File("target",
      TestHttpServerWithSpengo.class.getName() + "-root");
  private static MiniKdc testMiniKDC;
  private static File secretFile = new File(testRootDir, SECRET_STR);
  @BeforeClass
  public static void setUp() throws Exception {
    try {
      testMiniKDC = new MiniKdc(MiniKdc.createConf(), testRootDir);
      testMiniKDC.start();
      testMiniKDC.createPrincipal(
          httpSpnegoKeytabFile, HTTP_USER + "/localhost");
    } catch (Exception e) {
      assertTrue("Couldn't setup MiniKDC", false);
    }
    Writer w = new FileWriter(secretFile);
    w.write("secret");
    w.close();
  }
  @AfterClass
  public static void tearDown() {
    if (testMiniKDC != null) {
      testMiniKDC.stop();
    }
  }
  /**
   * groupA
   *  - userA
   * groupB
   *  - userA, userB
   * groupC
   *  - userC
   * SPNEGO filter has been enabled.
   * userA has the privilege to impersonate users in groupB.
   * userA has admin access to all default servlets, but userB
   * and userC don't have. So "/logs" can only be accessed by userA.
   * @throws Exception
   */
  @Test
  public void testAuthenticationWithProxyUser() throws Exception {
    Configuration spengoConf = getSpengoConf(new Configuration());
    //setup logs dir
    System.setProperty("hadoop.log.dir", testRootDir.getAbsolutePath());
    // Setup user group
    UserGroupInformation.createUserForTesting("userA",
        new String[]{"groupA", "groupB"});
    UserGroupInformation.createUserForTesting("userB",
        new String[]{"groupB"});
    UserGroupInformation.createUserForTesting("userC",
        new String[]{"groupC"});
    // Make userA impersonate users in groupB
    spengoConf.set("hadoop.proxyuser.userA.hosts", "*");
    spengoConf.set("hadoop.proxyuser.userA.groups", "groupB");
    ProxyUsers.refreshSuperUserGroupsConfiguration(spengoConf);
    HttpServer2 httpServer = null;
    try {
      // Create http server to test.
      httpServer = getCommonBuilder()
          .setConf(spengoConf)
          .setACL(new AccessControlList("userA groupA"))
          .build();
      httpServer.start();
      // Get signer to encrypt token
      Signer signer = getSignerToEncrypt();
      // setup auth token for userA
      AuthenticatedURL.Token token = getEncryptedAuthToken(signer, "userA");
      String serverURL = "http://" +
          NetUtils.getHostPortString(httpServer.getConnectorAddress(0)) + "/";
      // The default authenticator is kerberos.
      AuthenticatedURL authUrl = new AuthenticatedURL();
      // userA impersonates userB, it's allowed.
      for (String servlet :
          new String[]{"stacks", "jmx", "conf"}) {
        HttpURLConnection conn = authUrl
            .openConnection(new URL(serverURL + servlet + "?doAs=userB"),
                token);
        Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
      }
      // userA cannot impersonate userC, it fails.
      for (String servlet :
          new String[]{"stacks", "jmx", "conf"}){
        HttpURLConnection conn = authUrl
            .openConnection(new URL(serverURL + servlet + "?doAs=userC"),
                token);
        Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
            conn.getResponseCode());
      }
      // "/logs" and "/logLevel" require admin authorization,
      // only userA has the access.
      for (String servlet :
          new String[]{"logLevel", "logs"}) {
        HttpURLConnection conn = authUrl
            .openConnection(new URL(serverURL + servlet), token);
        Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode());
      }
      // Setup token for userB
      token = getEncryptedAuthToken(signer, "userB");
      // userB cannot access these servlets.
      for (String servlet :
          new String[]{"logLevel", "logs"}) {
        HttpURLConnection conn = authUrl
            .openConnection(new URL(serverURL + servlet), token);
        Assert.assertEquals(HttpURLConnection.HTTP_FORBIDDEN,
            conn.getResponseCode());
      }
    } finally {
      if (httpServer != null) {
        httpServer.stop();
      }
    }
  }
  private AuthenticatedURL.Token getEncryptedAuthToken(Signer signer,
      String user) throws Exception {
    AuthenticationToken token =
        new AuthenticationToken(user, user, "kerberos");
    token.setExpires(System.currentTimeMillis() + TIMEOUT);
    return new AuthenticatedURL.Token(signer.sign(token.toString()));
  }
  private Signer getSignerToEncrypt() throws Exception {
    SignerSecretProvider secretProvider =
        StringSignerSecretProviderCreator.newStringSignerSecretProvider();
    Properties secretProviderProps = new Properties();
    secretProviderProps.setProperty(
        AuthenticationFilter.SIGNATURE_SECRET, SECRET_STR);
    secretProvider.init(secretProviderProps, null, TIMEOUT);
    return new Signer(secretProvider);
  }
  private Configuration getSpengoConf(Configuration conf) {
    conf = new Configuration();
    conf.set(HttpServer2.FILTER_INITIALIZER_PROPERTY,
        AuthenticationFilterInitializer.class.getName());
    conf.set(PREFIX + "type", "kerberos");
    conf.setBoolean(PREFIX + "simple.anonymous.allowed", false);
    conf.set(PREFIX + "signature.secret.file",
        secretFile.getAbsolutePath());
    conf.set(PREFIX + "kerberos.keytab",
        httpSpnegoKeytabFile.getAbsolutePath());
    conf.set(PREFIX + "kerberos.principal", httpSpnegoPrincipal);
    conf.set(PREFIX + "cookie.domain", realm);
    conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
        true);
    return conf;
  }
  private HttpServer2.Builder getCommonBuilder() throws Exception {
    return new HttpServer2.Builder().setName("test")
        .addEndpoint(new URI("http://localhost:0"))
        .setFindPort(true);
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/metrics/TestDataNodeOutlierDetectionViaMetrics.java 
 | 142 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 * <p>
 * http://www.apache.org/licenses/LICENSE-2.0
 * <p>
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs.server.datanode.metrics;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.Map;
import java.util.Random;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
/**
 * Test that the {@link DataNodePeerMetrics} class is able to detect
 * outliers i.e. slow nodes via the metrics it maintains.
 */
public class TestDataNodeOutlierDetectionViaMetrics {
  public static final Logger LOG =
      LoggerFactory.getLogger(TestDataNodeOutlierDetectionViaMetrics.class);
  /**
   * Set a timeout for every test case.
   */
  @Rule
  public Timeout testTimeout = new Timeout(300_000);
  // A few constants to keep the test run time short.
  private static final int WINDOW_INTERVAL_SECONDS = 3;
  private static final int ROLLING_AVERAGE_WINDOWS = 10;
  private static final int SLOW_NODE_LATENCY_MS = 20_000;
  private static final int FAST_NODE_MAX_LATENCY_MS = 5;
  private Random random = new Random(System.currentTimeMillis());
  @Before
  public void setup() {
    GenericTestUtils.setLogLevel(DataNodePeerMetrics.LOG, Level.ALL);
    GenericTestUtils.setLogLevel(SlowNodeDetector.LOG, Level.ALL);
  }
  /**
   * Test that a very slow peer is detected as an outlier.
   */
  @Test
  public void testOutlierIsDetected() throws Exception {
    final String slowNodeName = "SlowNode";
    DataNodePeerMetrics peerMetrics = new DataNodePeerMetrics(
        "PeerMetrics-For-Test", WINDOW_INTERVAL_SECONDS,
        ROLLING_AVERAGE_WINDOWS);
    injectFastNodesSamples(peerMetrics);
    injectSlowNodeSamples(peerMetrics, slowNodeName);
    // Trigger a snapshot.
    peerMetrics.dumpSendPacketDownstreamAvgInfoAsJson();
    final Map<String, Double> outliers = peerMetrics.getOutliers();
    LOG.info("Got back outlier nodes: {}", outliers);
    assertThat(outliers.size(), is(1));
    assertTrue(outliers.containsKey(slowNodeName));
  }
  /**
   * Test that when there are no outliers, we get back nothing.
   */
  @Test
  public void testWithNoOutliers() throws Exception {
    DataNodePeerMetrics peerMetrics = new DataNodePeerMetrics(
        "PeerMetrics-For-Test", WINDOW_INTERVAL_SECONDS,
        ROLLING_AVERAGE_WINDOWS);
    injectFastNodesSamples(peerMetrics);
    // Trigger a snapshot.
    peerMetrics.dumpSendPacketDownstreamAvgInfoAsJson();
    // Ensure that we get back the outlier.
    assertTrue(peerMetrics.getOutliers().isEmpty());
  }
  /**
   * Inject fake stats for MIN_OUTLIER_DETECTION_PEERS fast nodes.
   *
   * @param peerMetrics
   */
  public void injectFastNodesSamples(DataNodePeerMetrics peerMetrics) {
    for (int nodeIndex = 0;
         nodeIndex < SlowNodeDetector.getMinOutlierDetectionPeers();
         ++nodeIndex) {
      final String nodeName = "FastNode-" + nodeIndex;
      LOG.info("Generating stats for node {}", nodeName);
      for (int i = 0;
           i < 2 * DataNodePeerMetrics.MIN_OUTLIER_DETECTION_SAMPLES;
           ++i) {
        peerMetrics.addSendPacketDownstream(
            nodeName, random.nextInt(FAST_NODE_MAX_LATENCY_MS));
      }
    }
  }
  /**
   * Inject fake stats for one extremely slow node.
   */
  public void injectSlowNodeSamples(
      DataNodePeerMetrics peerMetrics, String slowNodeName)
      throws InterruptedException {
    // And the one slow node.
    for (int i = 0;
         i < 2 * DataNodePeerMetrics.MIN_OUTLIER_DETECTION_SAMPLES;
         ++i) {
      peerMetrics.addSendPacketDownstream(
          slowNodeName, SLOW_NODE_LATENCY_MS);
    }
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebServer.java 
 | 152 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.crypto.key.kms.server;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.MalformedURLException;
import java.net.URI;
import java.net.URL;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.ConfigurationWithLogging;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.http.HttpServer2;
import org.apache.hadoop.security.ssl.SSLFactory;
import org.apache.hadoop.util.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
 * The KMS web server.
 */
@InterfaceAudience.Private
public class KMSWebServer {
  private static final Logger LOG =
      LoggerFactory.getLogger(KMSWebServer.class);
  private static final String NAME = "kms";
  private static final String SERVLET_PATH = "/kms";
  private final HttpServer2 httpServer;
  private final String scheme;
  KMSWebServer(Configuration conf, Configuration sslConf) throws Exception {
    // Override configuration with deprecated environment variables.
    deprecateEnv("KMS_TEMP", conf, HttpServer2.HTTP_TEMP_DIR_KEY,
        KMSConfiguration.KMS_SITE_XML);
    deprecateEnv("KMS_HTTP_PORT", conf,
        KMSConfiguration.HTTP_PORT_KEY, KMSConfiguration.KMS_SITE_XML);
    deprecateEnv("KMS_MAX_THREADS", conf,
        HttpServer2.HTTP_MAX_THREADS_KEY, KMSConfiguration.KMS_SITE_XML);
    deprecateEnv("KMS_MAX_HTTP_HEADER_SIZE", conf,
        HttpServer2.HTTP_MAX_REQUEST_HEADER_SIZE_KEY,
        KMSConfiguration.KMS_SITE_XML);
    deprecateEnv("KMS_MAX_HTTP_HEADER_SIZE", conf,
        HttpServer2.HTTP_MAX_RESPONSE_HEADER_SIZE_KEY,
        KMSConfiguration.KMS_SITE_XML);
    deprecateEnv("KMS_SSL_ENABLED", conf,
        KMSConfiguration.SSL_ENABLED_KEY, KMSConfiguration.KMS_SITE_XML);
    deprecateEnv("KMS_SSL_KEYSTORE_FILE", sslConf,
        SSLFactory.SSL_SERVER_KEYSTORE_LOCATION,
        SSLFactory.SSL_SERVER_CONF_DEFAULT);
    deprecateEnv("KMS_SSL_KEYSTORE_PASS", sslConf,
        SSLFactory.SSL_SERVER_KEYSTORE_PASSWORD,
        SSLFactory.SSL_SERVER_CONF_DEFAULT);
    boolean sslEnabled = conf.getBoolean(KMSConfiguration.SSL_ENABLED_KEY,
        KMSConfiguration.SSL_ENABLED_DEFAULT);
    scheme = sslEnabled ? HttpServer2.HTTPS_SCHEME : HttpServer2.HTTP_SCHEME;
    String host = conf.get(KMSConfiguration.HTTP_HOST_KEY,
        KMSConfiguration.HTTP_HOST_DEFAULT);
    int port = conf.getInt(KMSConfiguration.HTTP_PORT_KEY,
        KMSConfiguration.HTTP_PORT_DEFAULT);
    URI endpoint = new URI(scheme, null, host, port, null, null, null);
    httpServer = new HttpServer2.Builder()
        .setName(NAME)
        .setConf(conf)
        .setSSLConf(sslConf)
        .authFilterConfigurationPrefix(KMSAuthenticationFilter.CONFIG_PREFIX)
        .addEndpoint(endpoint)
        .build();
  }
  /**
   * Load the deprecated environment variable into the configuration.
   *
   * @param varName the environment variable name
   * @param conf the configuration
   * @param propName the configuration property name
   * @param confFile the configuration file name
   */
  private static void deprecateEnv(String varName, Configuration conf,
                                   String propName, String confFile) {
    String value = System.getenv(varName);
    if (value == null) {
      return;
    }
    String propValue = conf.get(propName);
    LOG.warn("Environment variable {} = '{}' is deprecated and overriding"
        + " property {} = '{}', please set the property in {} instead.",
        varName, value, propName, propValue, confFile);
    conf.set(propName, value, "environment variable " + varName);
  }
  public void start() throws IOException {
    httpServer.start();
  }
  public boolean isRunning() {
    return httpServer.isAlive();
  }
  public void join() throws InterruptedException {
    httpServer.join();
  }
  public void stop() throws Exception {
    httpServer.stop();
  }
  public URL getKMSUrl() {
    InetSocketAddress addr = httpServer.getConnectorAddress(0);
    if (null == addr) {
      return null;
    }
    try {
      return new URL(scheme, addr.getHostName(), addr.getPort(),
          SERVLET_PATH);
    } catch (MalformedURLException ex) {
      throw new RuntimeException("It should never happen: " + ex.getMessage(),
          ex);
    }
  }
  public static void main(String[] args) throws Exception {
    StringUtils.startupShutdownMessage(KMSWebServer.class, args, LOG);
    Configuration conf = new ConfigurationWithLogging(
        KMSConfiguration.getKMSConf());
    Configuration sslConf = new ConfigurationWithLogging(
        SSLFactory.readSSLConfiguration(conf, SSLFactory.Mode.SERVER));
    KMSWebServer kmsWebServer = new KMSWebServer(conf, sslConf);
    kmsWebServer.start();
    kmsWebServer.join();
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/AppPriorityACLsManager.java 
 | 230 
							 | 
	/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements.  See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership.  The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License.  You may obtain a copy of the License at
*
*     http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.yarn.server.resourcemanager.security;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.AppPriorityACLGroup;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
/**
 *
 * Manager class to store and check permission for Priority ACLs.
 */
public class AppPriorityACLsManager {
  private static final Log LOG = LogFactory
      .getLog(AppPriorityACLsManager.class);
  /*
   * An internal class to store ACLs specific to each priority. This will be
   * used to read and process acl's during app submission time as well.
   */
  private static class PriorityACL {
    private Priority priority;
    private Priority defaultPriority;
    private AccessControlList acl;
    PriorityACL(Priority priority, Priority defaultPriority,
        AccessControlList acl) {
      this.setPriority(priority);
      this.setDefaultPriority(defaultPriority);
      this.setAcl(acl);
    }
    public Priority getPriority() {
      return priority;
    }
    public void setPriority(Priority maxPriority) {
      this.priority = maxPriority;
    }
    public Priority getDefaultPriority() {
      return defaultPriority;
    }
    public void setDefaultPriority(Priority defaultPriority) {
      this.defaultPriority = defaultPriority;
    }
    public AccessControlList getAcl() {
      return acl;
    }
    public void setAcl(AccessControlList acl) {
      this.acl = acl;
    }
  }
  private boolean isACLsEnable;
  private final ConcurrentMap<String, List<PriorityACL>> allAcls =
      new ConcurrentHashMap<>();
  public AppPriorityACLsManager(Configuration conf) {
    this.isACLsEnable = conf.getBoolean(YarnConfiguration.YARN_ACL_ENABLE,
        YarnConfiguration.DEFAULT_YARN_ACL_ENABLE);
  }
  /**
   * Clear priority acl during refresh.
   *
   * @param queueName
   *          Queue Name
   */
  public void clearPriorityACLs(String queueName) {
    allAcls.remove(queueName);
  }
  /**
   * Each Queue could have configured with different priority acl's groups. This
   * method helps to store each such ACL list against queue.
   *
   * @param priorityACLGroups
   *          List of Priority ACL Groups.
   * @param queueName
   *          Queue Name associate with priority acl groups.
   */
  public void addPrioirityACLs(List<AppPriorityACLGroup> priorityACLGroups,
      String queueName) {
    List<PriorityACL> priorityACL = allAcls.get(queueName);
    if (null == priorityACL) {
      priorityACL = new ArrayList<PriorityACL>();
      allAcls.put(queueName, priorityACL);
    }
    // Ensure lowest priority PriorityACLGroup comes first in the list.
    Collections.sort(priorityACLGroups);
    for (AppPriorityACLGroup priorityACLGroup : priorityACLGroups) {
      priorityACL.add(new PriorityACL(priorityACLGroup.getMaxPriority(),
          priorityACLGroup.getDefaultPriority(),
          priorityACLGroup.getACLList()));
      if (LOG.isDebugEnabled()) {
        LOG.debug("Priority ACL group added: max-priority - "
            + priorityACLGroup.getMaxPriority() + "default-priority - "
            + priorityACLGroup.getDefaultPriority());
      }
    }
  }
  /**
   * Priority based checkAccess to ensure that given user has enough permission
   * to submit application at a given priority level.
   *
   * @param callerUGI
   *          User who submits the application.
   * @param queueName
   *          Queue to which application is submitted.
   * @param submittedPriority
   *          priority of the application.
   * @return True or False to indicate whether application can be submitted at
   *         submitted priority level or not.
   */
  public boolean checkAccess(UserGroupInformation callerUGI, String queueName,
      Priority submittedPriority) {
    if (!isACLsEnable) {
      return true;
    }
    List<PriorityACL> acls = allAcls.get(queueName);
    if (acls == null || acls.isEmpty()) {
      return true;
    }
    PriorityACL approvedPriorityACL = getMappedPriorityAclForUGI(acls,
        callerUGI, submittedPriority);
    if (approvedPriorityACL == null) {
      return false;
    }
    return true;
  }
  /**
   * If an application is submitted without any priority, and submitted user has
   * a default priority, this method helps to update this default priority as
   * app's priority.
   *
   * @param queueName
   *          Submitted queue
   * @param user
   *          User who submitted this application
   * @return Default priority associated with given user.
   */
  public Priority getDefaultPriority(String queueName,
      UserGroupInformation user) {
    if (!isACLsEnable) {
      return null;
    }
    List<PriorityACL> acls = allAcls.get(queueName);
    if (acls == null || acls.isEmpty()) {
      return null;
    }
    PriorityACL approvedPriorityACL = getMappedPriorityAclForUGI(acls, user,
        null);
    if (approvedPriorityACL == null) {
      return null;
    }
    Priority defaultPriority = Priority
        .newInstance(approvedPriorityACL.getDefaultPriority().getPriority());
    return defaultPriority;
  }
  private PriorityACL getMappedPriorityAclForUGI(List<PriorityACL> acls ,
      UserGroupInformation user, Priority submittedPriority) {
    // Iterate through all configured ACLs starting from lower priority.
    // If user is found corresponding to a configured priority, then store
    // that entry. if failed, continue iterate through whole acl list.
    PriorityACL selectedAcl = null;
    for (PriorityACL entry : acls) {
      AccessControlList list = entry.getAcl();
      if (list.isUserAllowed(user)) {
        selectedAcl = entry;
        // If submittedPriority is passed through the argument, also check
        // whether submittedPriority is under max-priority of each ACL group.
        if (submittedPriority != null) {
          selectedAcl = null;
          if (submittedPriority.getPriority() <= entry.getPriority()
              .getPriority()) {
            return entry;
          }
        }
      }
    }
    return selectedAcl;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ProfilingFileIoEvents.java 
 | 80 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 * <p>
 * http://www.apache.org/licenses/LICENSE-2.0
 * <p>
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs.server.datanode;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.DataNodeVolumeMetrics;
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
import org.apache.hadoop.util.Time;
import javax.annotation.Nullable;
import java.util.concurrent.ThreadLocalRandom;
/**
 * Profiles the performance of the metadata and data related operations on
 * datanode volumes.
 */
@InterfaceAudience.Private
class ProfilingFileIoEvents {
  static final Log LOG = LogFactory.getLog(ProfilingFileIoEvents.class);
  private final boolean isEnabled;
  private final int sampleRangeMax;
  public ProfilingFileIoEvents(@Nullable Configuration conf) {
    if (conf != null) {
      isEnabled = conf.getBoolean(DFSConfigKeys
          .DFS_DATANODE_ENABLE_FILEIO_PROFILING_KEY, DFSConfigKeys
          .DFS_DATANODE_ENABLE_FILEIO_PROFILING_DEFAULT);
      double fileIOSamplingFraction = conf.getDouble(DFSConfigKeys
              .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY,
          DFSConfigKeys
              .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_DEAFULT);
      if (fileIOSamplingFraction > 1) {
        LOG.warn(DFSConfigKeys
            .DFS_DATANODE_FILEIO_PROFILING_SAMPLING_FRACTION_KEY +
            " value cannot be more than 1. Setting value to 1");
        fileIOSamplingFraction = 1;
      }
      sampleRangeMax = (int) (fileIOSamplingFraction * Integer.MAX_VALUE);
    } else {
      isEnabled = false;
      sampleRangeMax = 0;
    }
  }
  public long beforeMetadataOp(@Nullable FsVolumeSpi volume,
      FileIoProvider.OPERATION op) {
    if (isEnabled) {
      DataNodeVolumeMetrics metrics = getVolumeMetrics(volume);
      if (metrics != null) {
        return Time.monotonicNow();
      }
    }
    return 0;
  }
  public void afterMetadataOp(@Nullable FsVolumeSpi volume,
      FileIoProvider.OPERATION op, long begin) {
    if (isEnabled) {
      DataNodeVolumeMetrics metrics = getVolumeMetrics(volume);
      if (metrics != null) {
        metrics.addMetadastaOperationLatency(Time.monotonicNow() - begin);
      }
    }
  }
  public long beforeFileIo(@Nullable FsVolumeSpi volume,
      FileIoProvider.OPERATION op, long len) {
    if (isEnabled && ThreadLocalRandom.current().nextInt() < sampleRangeMax) {
      DataNodeVolumeMetrics metrics = getVolumeMetrics(volume);
      if (metrics != null) {
        return Time.monotonicNow();
      }
    }
    return 0;
  }
  public void afterFileIo(@Nullable FsVolumeSpi volume,
      FileIoProvider.OPERATION op, long begin, long len) {
    if (isEnabled && begin != 0) {
      DataNodeVolumeMetrics metrics = getVolumeMetrics(volume);
      if (metrics != null) {
        long latency = Time.monotonicNow() - begin;
        metrics.addDataFileIoLatency(latency);
        switch (op) {
        case SYNC:
          metrics.addSyncIoLatency(latency);
          break;
        case FLUSH:
          metrics.addFlushIoLatency(latency);
          break;
        case READ:
          metrics.addReadIoLatency(latency);
          break;
        case WRITE:
          metrics.addWriteIoLatency(latency);
          break;
        default:
        }
      }
    }
  }
  public void onFailure(@Nullable FsVolumeSpi volume, long begin) {
    if (isEnabled) {
      DataNodeVolumeMetrics metrics = getVolumeMetrics(volume);
      if (metrics != null) {
        metrics.addFileIoError(Time.monotonicNow() - begin);
      }
    }
  }
  private DataNodeVolumeMetrics getVolumeMetrics(final FsVolumeSpi volume) {
    if (isEnabled) {
      if (volume != null) {
        return volume.getMetrics();
      }
    }
    return null;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/ZStandardCodec.java 
 | 242 
							 | 
	/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.io.compress;
import org.apache.hadoop.conf.Configurable;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.compress.zstd.ZStandardCompressor;
import org.apache.hadoop.io.compress.zstd.ZStandardDecompressor;
import org.apache.hadoop.util.NativeCodeLoader;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_DEFAULT;
import static org.apache.hadoop.fs.CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_KEY;
/**
 * This class creates zstd compressors/decompressors.
 */
public class ZStandardCodec implements
    Configurable, CompressionCodec, DirectDecompressionCodec  {
  private Configuration conf;
  /**
   * Set the configuration to be used by this object.
   *
   * @param conf the configuration object.
   */
  @Override
  public void setConf(Configuration conf) {
    this.conf = conf;
  }
  /**
   * Return the configuration used by this object.
   *
   * @return the configuration object used by this object.
   */
  @Override
  public Configuration getConf() {
    return conf;
  }
  public static void checkNativeCodeLoaded() {
    if (!NativeCodeLoader.isNativeCodeLoaded() ||
        !NativeCodeLoader.buildSupportsZstd()) {
      throw new RuntimeException("native zStandard library "
          + "not available: this version of libhadoop was built "
          + "without zstd support.");
    }
    if (!ZStandardCompressor.isNativeCodeLoaded()) {
      throw new RuntimeException("native zStandard library not "
          + "available: ZStandardCompressor has not been loaded.");
    }
    if (!ZStandardDecompressor.isNativeCodeLoaded()) {
      throw new RuntimeException("native zStandard library not "
          + "available: ZStandardDecompressor has not been loaded.");
    }
  }
  public static boolean isNativeCodeLoaded() {
    return ZStandardCompressor.isNativeCodeLoaded()
        && ZStandardDecompressor.isNativeCodeLoaded();
  }
  public static String getLibraryName() {
    return ZStandardCompressor.getLibraryName();
  }
  public static int getCompressionLevel(Configuration conf) {
    return conf.getInt(
        CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_KEY,
        CommonConfigurationKeys.IO_COMPRESSION_CODEC_ZSTD_LEVEL_DEFAULT);
  }
  public static int getCompressionBufferSize(Configuration conf) {
    int bufferSize = getBufferSize(conf);
    return bufferSize == 0 ?
        ZStandardCompressor.getRecommendedBufferSize() :
        bufferSize;
  }
  public static int getDecompressionBufferSize(Configuration conf) {
    int bufferSize = getBufferSize(conf);
    return bufferSize == 0 ?
        ZStandardDecompressor.getRecommendedBufferSize() :
        bufferSize;
  }
  private static int getBufferSize(Configuration conf) {
    return conf.getInt(IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_KEY,
        IO_COMPRESSION_CODEC_ZSTD_BUFFER_SIZE_DEFAULT);
  }
  /**
   * Create a {@link CompressionOutputStream} that will write to the given
   * {@link OutputStream}.
   *
   * @param out the location for the final output stream
   * @return a stream the user can write uncompressed data to have compressed
   * @throws IOException
   */
  @Override
  public CompressionOutputStream createOutputStream(OutputStream out)
      throws IOException {
    return Util.
        createOutputStreamWithCodecPool(this, conf, out);
  }
  /**
   * Create a {@link CompressionOutputStream} that will write to the given
   * {@link OutputStream} with the given {@link Compressor}.
   *
   * @param out        the location for the final output stream
   * @param compressor compressor to use
   * @return a stream the user can write uncompressed data to have compressed
   * @throws IOException
   */
  @Override
  public CompressionOutputStream createOutputStream(OutputStream out,
      Compressor compressor)
      throws IOException {
    checkNativeCodeLoaded();
    return new CompressorStream(out, compressor,
        getCompressionBufferSize(conf));
  }
  /**
   * Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
   *
   * @return the type of compressor needed by this codec.
   */
  @Override
  public Class<? extends Compressor> getCompressorType() {
    checkNativeCodeLoaded();
    return ZStandardCompressor.class;
  }
  /**
   * Create a new {@link Compressor} for use by this {@link CompressionCodec}.
   *
   * @return a new compressor for use by this codec
   */
  @Override
  public Compressor createCompressor() {
    checkNativeCodeLoaded();
    return new ZStandardCompressor(
        getCompressionLevel(conf), getCompressionBufferSize(conf));
  }
  /**
   * Create a {@link CompressionInputStream} that will read from the given
   * input stream.
   *
   * @param in the stream to read compressed bytes from
   * @return a stream to read uncompressed bytes from
   * @throws IOException
   */
  @Override
  public CompressionInputStream createInputStream(InputStream in)
      throws IOException {
    return Util.
        createInputStreamWithCodecPool(this, conf, in);
  }
  /**
   * Create a {@link CompressionInputStream} that will read from the given
   * {@link InputStream} with the given {@link Decompressor}.
   *
   * @param in           the stream to read compressed bytes from
   * @param decompressor decompressor to use
   * @return a stream to read uncompressed bytes from
   * @throws IOException
   */
  @Override
  public CompressionInputStream createInputStream(InputStream in,
                                                  Decompressor decompressor)
      throws IOException {
    checkNativeCodeLoaded();
    return new DecompressorStream(in, decompressor,
        getDecompressionBufferSize(conf));
  }
  /**
   * Get the type of {@link Decompressor} needed by
   * this {@link CompressionCodec}.
   *
   * @return the type of decompressor needed by this codec.
   */
  @Override
  public Class<? extends Decompressor> getDecompressorType() {
    checkNativeCodeLoaded();
    return ZStandardDecompressor.class;
  }
  /**
   * Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
   *
   * @return a new decompressor for use by this codec
   */
  @Override
  public Decompressor createDecompressor() {
    checkNativeCodeLoaded();
    return new ZStandardDecompressor(getDecompressionBufferSize(conf));
  }
  /**
   * Get the default filename extension for this kind of compression.
   *
   * @return <code>.zst</code>.
   */
  @Override
  public String getDefaultExtension() {
    return ".zst";
  }
  @Override
  public DirectDecompressor createDirectDecompressor() {
    return new ZStandardDecompressor.ZStandardDirectDecompressor(
        getDecompressionBufferSize(conf)
    );
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempSchedulerNode.java 
 | 120 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
import org.apache.hadoop.yarn.util.resource.Resources;
import java.util.List;
/**
 * This class will save necessary information which copied from
 * FiCaSchedulerNode. This is added majorly for performance consideration, this
 * can be cached to avoid hitting scheduler again and again. In addition,
 * we can add some preemption-required fields to the class.
 */
public class TempSchedulerNode {
  private List<RMContainer> runningContainers;
  private RMContainer reservedContainer;
  private Resource totalResource;
  // excluded reserved resource
  private Resource allocatedResource;
  // total - allocated
  private Resource availableResource;
  // just a shortcut of reservedContainer.getResource.
  private Resource reservedResource;
  private NodeId nodeId;
  public static TempSchedulerNode fromSchedulerNode(
      FiCaSchedulerNode schedulerNode) {
    TempSchedulerNode n = new TempSchedulerNode();
    n.totalResource = Resources.clone(schedulerNode.getTotalResource());
    n.allocatedResource = Resources.clone(schedulerNode.getAllocatedResource());
    n.runningContainers = schedulerNode.getCopiedListOfRunningContainers();
    n.reservedContainer = schedulerNode.getReservedContainer();
    if (n.reservedContainer != null) {
      n.reservedResource = n.reservedContainer.getReservedResource();
    } else {
      n.reservedResource = Resources.none();
    }
    n.availableResource = Resources.subtract(n.totalResource,
        n.allocatedResource);
    n.nodeId = schedulerNode.getNodeID();
    return n;
  }
  public NodeId getNodeId() {
    return nodeId;
  }
  public List<RMContainer> getRunningContainers() {
    return runningContainers;
  }
  public void setRunningContainers(List<RMContainer> runningContainers) {
    this.runningContainers = runningContainers;
  }
  public RMContainer getReservedContainer() {
    return reservedContainer;
  }
  public void setReservedContainer(RMContainer reservedContainer) {
    this.reservedContainer = reservedContainer;
  }
  public Resource getTotalResource() {
    return totalResource;
  }
  public void setTotalResource(Resource totalResource) {
    this.totalResource = totalResource;
  }
  public Resource getAllocatedResource() {
    return allocatedResource;
  }
  public void setAllocatedResource(Resource allocatedResource) {
    this.allocatedResource = allocatedResource;
  }
  public Resource getAvailableResource() {
    return availableResource;
  }
  public void setAvailableResource(Resource availableResource) {
    this.availableResource = availableResource;
  }
  public Resource getReservedResource() {
    return reservedResource;
  }
  public void setReservedResource(Resource reservedResource) {
    this.reservedResource = reservedResource;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/PerContainerLogFileInfo.java 
 | 93 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.logaggregation;
/**
 * PerContainerLogFileInfo represents the meta data for a container log file,
 * which includes:
 * <ul>
 *   <li>The filename of the container log.</li>
 *   <li>The size of the container log.</li>
 *   <li>The last modification time of the container log.</li>
 * </ul>
 *
 */
public class PerContainerLogFileInfo {
  private String fileName;
  private String fileSize;
  private String lastModifiedTime;
  //JAXB needs this
  public PerContainerLogFileInfo() {}
  public PerContainerLogFileInfo(String fileName, String fileSize,
      String lastModifiedTime) {
    this.setFileName(fileName);
    this.setFileSize(fileSize);
    this.setLastModifiedTime(lastModifiedTime);
  }
  public String getFileName() {
    return fileName;
  }
  public void setFileName(String fileName) {
    this.fileName = fileName;
  }
  public String getFileSize() {
    return fileSize;
  }
  public void setFileSize(String fileSize) {
    this.fileSize = fileSize;
  }
  public String getLastModifiedTime() {
    return lastModifiedTime;
  }
  public void setLastModifiedTime(String lastModifiedTime) {
    this.lastModifiedTime = lastModifiedTime;
  }
  @Override
  public int hashCode() {
    final int prime = 31;
    int result = 1;
    result = prime * result + ((fileName == null) ? 0 : fileName.hashCode());
    result = prime * result + ((fileSize == null) ? 0 : fileSize.hashCode());
    result = prime * result + ((lastModifiedTime == null) ?
        0 : lastModifiedTime.hashCode());
    return result;
  }
  @Override
  public boolean equals(Object otherObj) {
    if (otherObj == this) {
      return true;
    }
    if (!(otherObj instanceof PerContainerLogFileInfo)) {
      return false;
    }
    PerContainerLogFileInfo other = (PerContainerLogFileInfo)otherObj;
    return other.fileName.equals(fileName) && other.fileSize.equals(fileSize)
        && other.lastModifiedTime.equals(lastModifiedTime);
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerLogsInfo.java 
 | 87 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.server.webapp.dao;
import java.util.ArrayList;
import java.util.List;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlRootElement;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.logaggregation.ContainerLogMeta;
import org.apache.hadoop.yarn.logaggregation.ContainerLogType;
import org.apache.hadoop.yarn.logaggregation.PerContainerLogFileInfo;
/**
 * {@code ContainerLogsInfo} includes the log meta-data of containers.
 * <p>
 * The container log meta-data includes details such as:
 * <ul>
 *   <li>A list of {@link PerContainerLogFileInfo}.</li>
 *   <li>The container Id.</li>
 *   <li>The NodeManager Id.</li>
 *   <li>The logType: could be local or aggregated</li>
 * </ul>
 */
@XmlRootElement(name = "containerLogsInfo")
@XmlAccessorType(XmlAccessType.FIELD)
public class ContainerLogsInfo {
  @XmlElement(name = "containerLogInfo")
  protected List<PerContainerLogFileInfo> containerLogsInfo;
  @XmlElement(name = "logType")
  protected String logType;
  @XmlElement(name = "containerId")
  protected String containerId;
  @XmlElement(name = "nodeId")
  protected String nodeId;
  //JAXB needs this
  public ContainerLogsInfo() {}
  public ContainerLogsInfo(ContainerLogMeta logMeta, ContainerLogType logType)
      throws YarnException {
    this.containerLogsInfo = new ArrayList<PerContainerLogFileInfo>(
        logMeta.getContainerLogMeta());
    this.logType = logType.toString();
    this.containerId = logMeta.getContainerId();
    this.nodeId = logMeta.getNodeId();
  }
  public List<PerContainerLogFileInfo> getContainerLogsInfo() {
    return this.containerLogsInfo;
  }
  public String getLogType() {
    return this.logType;
  }
  public String getContainerId() {
    return this.containerId;
  }
  public String getNodeId() {
    return this.nodeId;
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-tools/hadoop-azure-datalake/src/test/java/org/apache/hadoop/fs/adl/TestAzureADTokenProvider.java 
 | 153 
							 | 
	/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.fs.adl;
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.adl.common.CustomMockTokenProvider;
import org.apache.hadoop.fs.adl.oauth2.AzureADTokenProvider;
import com.microsoft.azure.datalake.store.oauth2.AccessTokenProvider;
import com.microsoft.azure.datalake.store.oauth2.ClientCredsTokenProvider;
import com.microsoft.azure.datalake.store.oauth2.RefreshTokenBasedTokenProvider;
import static org.apache.hadoop.fs.adl.AdlConfKeys.AZURE_AD_CLIENT_ID_KEY;
import static org.apache.hadoop.fs.adl.AdlConfKeys.AZURE_AD_CLIENT_SECRET_KEY;
import static org.apache.hadoop.fs.adl.AdlConfKeys.AZURE_AD_REFRESH_TOKEN_KEY;
import static org.apache.hadoop.fs.adl.AdlConfKeys.AZURE_AD_REFRESH_URL_KEY;
import static org.apache.hadoop.fs.adl.AdlConfKeys
    .AZURE_AD_TOKEN_PROVIDER_CLASS_KEY;
import static org.apache.hadoop.fs.adl.AdlConfKeys
    .AZURE_AD_TOKEN_PROVIDER_TYPE_KEY;
import static org.apache.hadoop.fs.adl.TokenProviderType.*;
import static org.junit.Assert.assertEquals;
import org.apache.hadoop.security.ProviderUtils;
import org.apache.hadoop.security.alias.CredentialProvider;
import org.apache.hadoop.security.alias.CredentialProviderFactory;
import org.junit.Assert;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.TemporaryFolder;
/**
 * Test appropriate token provider is loaded as per configuration.
 */
public class TestAzureADTokenProvider {
  private static final String CLIENT_ID = "MY_CLIENT_ID";
  private static final String REFRESH_TOKEN = "MY_REFRESH_TOKEN";
  private static final String CLIENT_SECRET = "MY_CLIENT_SECRET";
  private static final String REFRESH_URL = "http://localhost:8080/refresh";
  @Rule
  public final TemporaryFolder tempDir = new TemporaryFolder();
  @Test
  public void testRefreshTokenProvider()
      throws URISyntaxException, IOException {
    Configuration conf = new Configuration();
    conf.set(AZURE_AD_CLIENT_ID_KEY, "MY_CLIENTID");
    conf.set(AZURE_AD_REFRESH_TOKEN_KEY, "XYZ");
    conf.setEnum(AZURE_AD_TOKEN_PROVIDER_TYPE_KEY, RefreshToken);
    conf.set(AZURE_AD_REFRESH_URL_KEY, "http://localhost:8080/refresh");
    URI uri = new URI("adl://localhost:8080");
    AdlFileSystem fileSystem = new AdlFileSystem();
    fileSystem.initialize(uri, conf);
    AccessTokenProvider tokenProvider = fileSystem.getTokenProvider();
    Assert.assertTrue(tokenProvider instanceof RefreshTokenBasedTokenProvider);
  }
  @Test
  public void testClientCredTokenProvider()
      throws IOException, URISyntaxException {
    Configuration conf = new Configuration();
    conf.set(AZURE_AD_CLIENT_ID_KEY, "MY_CLIENTID");
    conf.set(AZURE_AD_CLIENT_SECRET_KEY, "XYZ");
    conf.setEnum(AZURE_AD_TOKEN_PROVIDER_TYPE_KEY, ClientCredential);
    conf.set(AZURE_AD_REFRESH_URL_KEY, "http://localhost:8080/refresh");
    URI uri = new URI("adl://localhost:8080");
    AdlFileSystem fileSystem = new AdlFileSystem();
    fileSystem.initialize(uri, conf);
    AccessTokenProvider tokenProvider = fileSystem.getTokenProvider();
    Assert.assertTrue(tokenProvider instanceof ClientCredsTokenProvider);
  }
  @Test
  public void testCustomCredTokenProvider()
      throws URISyntaxException, IOException {
    Configuration conf = new Configuration();
    conf.setClass(AZURE_AD_TOKEN_PROVIDER_CLASS_KEY,
        CustomMockTokenProvider.class, AzureADTokenProvider.class);
    URI uri = new URI("adl://localhost:8080");
    AdlFileSystem fileSystem = new AdlFileSystem();
    fileSystem.initialize(uri, conf);
    AccessTokenProvider tokenProvider = fileSystem.getTokenProvider();
    Assert.assertTrue(tokenProvider instanceof SdkTokenProviderAdapter);
  }
  @Test
  public void testInvalidProviderConfigurationForType()
      throws URISyntaxException, IOException {
    Configuration conf = new Configuration();
    URI uri = new URI("adl://localhost:8080");
    AdlFileSystem fileSystem = new AdlFileSystem();
    try {
      fileSystem.initialize(uri, conf);
      Assert.fail("Initialization should have failed due no token provider "
          + "configuration");
    } catch (IllegalArgumentException e) {
      Assert.assertTrue(
          e.getMessage().contains("dfs.adls.oauth2.access.token.provider"));
    }
    conf.setClass(AZURE_AD_TOKEN_PROVIDER_CLASS_KEY,
        CustomMockTokenProvider.class, AzureADTokenProvider.class);
    fileSystem.initialize(uri, conf);
  }
  @Test
  public void testInvalidProviderConfigurationForClassPath()
      throws URISyntaxException, IOException {
    Configuration conf = new Configuration();
    URI uri = new URI("adl://localhost:8080");
    AdlFileSystem fileSystem = new AdlFileSystem();
    conf.set(AZURE_AD_TOKEN_PROVIDER_CLASS_KEY,
        "wrong.classpath.CustomMockTokenProvider");
    try {
      fileSystem.initialize(uri, conf);
      Assert.fail("Initialization should have failed due invalid provider "
          + "configuration");
    } catch (RuntimeException e) {
      Assert.assertTrue(
          e.getMessage().contains("wrong.classpath.CustomMockTokenProvider"));
    }
  }
  private CredentialProvider createTempCredProvider(Configuration conf)
      throws URISyntaxException, IOException {
    final File file = tempDir.newFile("test.jks");
    final URI jks = ProviderUtils.nestURIForLocalJavaKeyStoreProvider(
        file.toURI());
    conf.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
        jks.toString());
    return CredentialProviderFactory.getProviders(conf).get(0);
  }
  @Test
  public void testRefreshTokenWithCredentialProvider()
      throws IOException, URISyntaxException {
    Configuration conf = new Configuration();
    conf.set(AZURE_AD_CLIENT_ID_KEY, "DUMMY");
    conf.set(AZURE_AD_REFRESH_TOKEN_KEY, "DUMMY");
    conf.setEnum(AZURE_AD_TOKEN_PROVIDER_TYPE_KEY, RefreshToken);
    CredentialProvider provider = createTempCredProvider(conf);
    provider.createCredentialEntry(AZURE_AD_CLIENT_ID_KEY,
        CLIENT_ID.toCharArray());
    provider.createCredentialEntry(AZURE_AD_REFRESH_TOKEN_KEY,
        REFRESH_TOKEN.toCharArray());
    provider.flush();
    URI uri = new URI("adl://localhost:8080");
    AdlFileSystem fileSystem = new AdlFileSystem();
    fileSystem.initialize(uri, conf);
    RefreshTokenBasedTokenProvider expected =
        new RefreshTokenBasedTokenProvider(CLIENT_ID, REFRESH_TOKEN);
    Assert.assertTrue(EqualsBuilder.reflectionEquals(expected,
        fileSystem.getTokenProvider()));
  }
  @Test
  public void testRefreshTokenWithCredentialProviderFallback()
      throws IOException, URISyntaxException {
    Configuration conf = new Configuration();
    conf.set(AZURE_AD_CLIENT_ID_KEY, CLIENT_ID);
    conf.set(AZURE_AD_REFRESH_TOKEN_KEY, REFRESH_TOKEN);
    conf.setEnum(AZURE_AD_TOKEN_PROVIDER_TYPE_KEY, RefreshToken);
    createTempCredProvider(conf);
    URI uri = new URI("adl://localhost:8080");
    AdlFileSystem fileSystem = new AdlFileSystem();
    fileSystem.initialize(uri, conf);
    RefreshTokenBasedTokenProvider expected =
        new RefreshTokenBasedTokenProvider(CLIENT_ID, REFRESH_TOKEN);
    Assert.assertTrue(EqualsBuilder.reflectionEquals(expected,
        fileSystem.getTokenProvider()));
  }
  @Test
  public void testClientCredWithCredentialProvider()
      throws IOException, URISyntaxException {
    Configuration conf = new Configuration();
    conf.set(AZURE_AD_CLIENT_ID_KEY, "DUMMY");
    conf.set(AZURE_AD_CLIENT_SECRET_KEY, "DUMMY");
    conf.set(AZURE_AD_REFRESH_URL_KEY, "DUMMY");
    conf.setEnum(AZURE_AD_TOKEN_PROVIDER_TYPE_KEY, ClientCredential);
    CredentialProvider provider = createTempCredProvider(conf);
    provider.createCredentialEntry(AZURE_AD_CLIENT_ID_KEY,
        CLIENT_ID.toCharArray());
    provider.createCredentialEntry(AZURE_AD_CLIENT_SECRET_KEY,
        CLIENT_SECRET.toCharArray());
    provider.createCredentialEntry(AZURE_AD_REFRESH_URL_KEY,
        REFRESH_URL.toCharArray());
    provider.flush();
    URI uri = new URI("adl://localhost:8080");
    AdlFileSystem fileSystem = new AdlFileSystem();
    fileSystem.initialize(uri, conf);
    ClientCredsTokenProvider expected = new ClientCredsTokenProvider(
        REFRESH_URL, CLIENT_ID, CLIENT_SECRET);
    Assert.assertTrue(EqualsBuilder.reflectionEquals(expected,
        fileSystem.getTokenProvider()));
  }
  @Test
  public void testClientCredWithCredentialProviderFallback()
      throws IOException, URISyntaxException {
    Configuration conf = new Configuration();
    conf.set(AZURE_AD_CLIENT_ID_KEY, CLIENT_ID);
    conf.set(AZURE_AD_CLIENT_SECRET_KEY, CLIENT_SECRET);
    conf.set(AZURE_AD_REFRESH_URL_KEY, REFRESH_URL);
    conf.setEnum(AZURE_AD_TOKEN_PROVIDER_TYPE_KEY, ClientCredential);
    createTempCredProvider(conf);
    URI uri = new URI("adl://localhost:8080");
    AdlFileSystem fileSystem = new AdlFileSystem();
    fileSystem.initialize(uri, conf);
    ClientCredsTokenProvider expected = new ClientCredsTokenProvider(
        REFRESH_URL, CLIENT_ID, CLIENT_SECRET);
    Assert.assertTrue(EqualsBuilder.reflectionEquals(expected,
        fileSystem.getTokenProvider()));
  }
  @Test
  public void testCredentialProviderPathExclusions() throws Exception {
    String providerPath =
        "user:///,jceks://adl/user/hrt_qa/sqoopdbpasswd.jceks," +
            "jceks://[email protected]/my/path/test.jceks";
    Configuration config = new Configuration();
    config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
        providerPath);
    String newPath =
        "user:///,jceks://[email protected]/my/path/test.jceks";
    excludeAndTestExpectations(config, newPath);
  }
  @Test
  public void testExcludeAllProviderTypesFromConfig() throws Exception {
    String providerPath =
        "jceks://adl/tmp/test.jceks," +
            "jceks://adl@/my/path/test.jceks";
    Configuration config = new Configuration();
    config.set(CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH,
        providerPath);
    String newPath = null;
    excludeAndTestExpectations(config, newPath);
  }
  void excludeAndTestExpectations(Configuration config, String newPath)
      throws Exception {
    Configuration conf = ProviderUtils.excludeIncompatibleCredentialProviders(
        config, AdlFileSystem.class);
    String effectivePath = conf.get(
        CredentialProviderFactory.CREDENTIAL_PROVIDER_PATH, null);
    assertEquals(newPath, effectivePath);
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestSlowPeerTracker.java 
 | 226 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 * <p>
 * http://www.apache.org/licenses/LICENSE-2.0
 * <p>
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hdfs.server.blockmanagement;
import com.fasterxml.jackson.core.type.TypeReference;
import com.fasterxml.jackson.databind.ObjectMapper;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.server.blockmanagement.SlowPeerTracker.ReportForJson;
import org.apache.hadoop.util.FakeTimer;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
import org.junit.rules.Timeout;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.io.IOException;
import java.util.Set;
import static org.hamcrest.core.Is.is;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.assertTrue;
/**
 * Tests for {@link SlowPeerTracker}.
 */
public class TestSlowPeerTracker {
  public static final Logger LOG = LoggerFactory.getLogger(
      TestSlowPeerTracker.class);
  /**
   * Set a timeout for every test case.
   */
  @Rule
  public Timeout testTimeout = new Timeout(300_000);
  private Configuration conf;
  private SlowPeerTracker tracker;
  private FakeTimer timer;
  private long reportValidityMs;
  @Before
  public void setup() {
    conf = new HdfsConfiguration();
    timer = new FakeTimer();
    tracker = new SlowPeerTracker(conf, timer);
    reportValidityMs = tracker.getReportValidityMs();
  }
  /**
   * Edge case, there are no reports to retrieve.
   */
  @Test
  public void testEmptyReports() {
    assertTrue(tracker.getReportsForAllDataNodes().isEmpty());
    assertTrue(tracker.getReportsForNode("noSuchNode").isEmpty());
  }
  @Test
  public void testReportsAreRetrieved() {
    tracker.addReport("node2", "node1");
    tracker.addReport("node3", "node1");
    tracker.addReport("node3", "node2");
    assertThat(tracker.getReportsForAllDataNodes().size(), is(2));
    assertThat(tracker.getReportsForNode("node2").size(), is(1));
    assertThat(tracker.getReportsForNode("node3").size(), is(2));
    assertThat(tracker.getReportsForNode("node1").size(), is(0));
  }
  /**
   * Test that when all reports are expired, we get back nothing.
   */
  @Test
  public void testAllReportsAreExpired() {
    tracker.addReport("node2", "node1");
    tracker.addReport("node3", "node2");
    tracker.addReport("node1", "node3");
    // No reports should expire after 1ms.
    timer.advance(1);
    assertThat(tracker.getReportsForAllDataNodes().size(), is(3));
    // All reports should expire after REPORT_VALIDITY_MS.
    timer.advance(reportValidityMs);
    assertTrue(tracker.getReportsForAllDataNodes().isEmpty());
    assertTrue(tracker.getReportsForNode("node1").isEmpty());
    assertTrue(tracker.getReportsForNode("node2").isEmpty());
    assertTrue(tracker.getReportsForNode("node3").isEmpty());
  }
  /**
   * Test the case when a subset of reports has expired.
   * Ensure that we only get back non-expired reports.
   */
  @Test
  public void testSomeReportsAreExpired() {
    tracker.addReport("node3", "node1");
    tracker.addReport("node3", "node2");
    timer.advance(reportValidityMs);
    tracker.addReport("node3", "node4");
    assertThat(tracker.getReportsForAllDataNodes().size(), is(1));
    assertThat(tracker.getReportsForNode("node3").size(), is(1));
    assertTrue(tracker.getReportsForNode("node3").contains("node4"));
  }
  /**
   * Test the case when an expired report is replaced by a valid one.
   */
  @Test
  public void testReplacement() {
    tracker.addReport("node2", "node1");
    timer.advance(reportValidityMs); // Expire the report.
    assertThat(tracker.getReportsForAllDataNodes().size(), is(0));
    // This should replace the expired report with a newer valid one.
    tracker.addReport("node2", "node1");
    assertThat(tracker.getReportsForAllDataNodes().size(), is(1));
    assertThat(tracker.getReportsForNode("node2").size(), is(1));
  }
  @Test
  public void testGetJson() throws IOException {
    tracker.addReport("node1", "node2");
    tracker.addReport("node2", "node3");
    tracker.addReport("node2", "node1");
    tracker.addReport("node4", "node1");
    final Set<ReportForJson> reports = getAndDeserializeJson();
    // And ensure its contents are what we expect.
    assertThat(reports.size(), is(3));
    assertTrue(isNodeInReports(reports, "node1"));
    assertTrue(isNodeInReports(reports, "node2"));
    assertTrue(isNodeInReports(reports, "node4"));
    assertFalse(isNodeInReports(reports, "node3"));
  }
  @Test
  public void testGetJsonSizeIsLimited() throws IOException {
    tracker.addReport("node1", "node2");
    tracker.addReport("node1", "node3");
    tracker.addReport("node2", "node3");
    tracker.addReport("node2", "node4");
    tracker.addReport("node3", "node4");
    tracker.addReport("node3", "node5");
    tracker.addReport("node4", "node6");
    tracker.addReport("node5", "node6");
    tracker.addReport("node5", "node7");
    tracker.addReport("node6", "node7");
    tracker.addReport("node6", "node8");
    final Set<ReportForJson> reports = getAndDeserializeJson();
    // Ensure that node4 is not in the list since it was
    // tagged by just one peer and we already have 5 other nodes.
    assertFalse(isNodeInReports(reports, "node4"));
    // Remaining nodes should be in the list.
    assertTrue(isNodeInReports(reports, "node1"));
    assertTrue(isNodeInReports(reports, "node2"));
    assertTrue(isNodeInReports(reports, "node3"));
    assertTrue(isNodeInReports(reports, "node5"));
    assertTrue(isNodeInReports(reports, "node6"));
  }
  @Test
  public void testLowRankedElementsIgnored() throws IOException {
    // Insert 5 nodes with 2 peer reports each.
    for (int i = 0; i < 5; ++i) {
      tracker.addReport("node" + i, "reporter1");
      tracker.addReport("node" + i, "reporter2");
    }
    // Insert 10 nodes with 1 peer report each.
    for (int i = 10; i < 20; ++i) {
      tracker.addReport("node" + i, "reporter1");
    }
    final Set<ReportForJson> reports = getAndDeserializeJson();
    // Ensure that only the first 5 nodes with two reports each were
    // included in the JSON.
    for (int i = 0; i < 5; ++i) {
      assertTrue(isNodeInReports(reports, "node" + i));
    }
  }
  private boolean isNodeInReports(
      Set<ReportForJson> reports, String node) {
    for (ReportForJson report : reports) {
      if (report.getSlowNode().equalsIgnoreCase(node)) {
        return true;
      }
    }
    return false;
  }
  private Set<ReportForJson> getAndDeserializeJson()
      throws IOException {
    final String json = tracker.getJson();
    LOG.info("Got JSON: {}", json);
    return (new ObjectMapper()).readValue(
        json, new TypeReference<Set<ReportForJson>>() {});
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
	hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestContainerLogsUtils.java 
 | 122 
							 | 
	/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.yarn.logaggregation;
import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
import java.io.Writer;
import java.util.ArrayList;
import java.util.List;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
/**
 * This class contains several utility functions for log aggregation tests.
 */
public final class TestContainerLogsUtils {
  private TestContainerLogsUtils() {}
  /**
   * Utility function to create container log file and upload
   * it into remote file system.
   * @param conf the configuration
   * @param fs the FileSystem
   * @param rootLogDir the root log directory
   * @param containerId the containerId
   * @param nodeId the nodeId
   * @param fileName the log file name
   * @param user the application user
   * @param content the log context
   * @param deletePreviousRemoteLogDir whether to delete remote log dir.
   * @throws IOException if we can not create log files locally
   *         or we can not upload container logs into RemoteFS.
   */
  public static void createContainerLogFileInRemoteFS(Configuration conf,
      FileSystem fs, String rootLogDir, ContainerId containerId, NodeId nodeId,
      String fileName, String user, String content,
      boolean deleteRemoteLogDir) throws IOException {
    UserGroupInformation ugi = UserGroupInformation.createRemoteUser(user);
    //prepare the logs for remote directory
    ApplicationId appId = containerId.getApplicationAttemptId()
        .getApplicationId();
    // create local logs
    List<String> rootLogDirList = new ArrayList<String>();
    rootLogDirList.add(rootLogDir);
    Path rootLogDirPath = new Path(rootLogDir);
    if (fs.exists(rootLogDirPath)) {
      fs.delete(rootLogDirPath, true);
    }
    assertTrue(fs.mkdirs(rootLogDirPath));
    Path appLogsDir = new Path(rootLogDirPath, appId.toString());
    if (fs.exists(appLogsDir)) {
      fs.delete(appLogsDir, true);
    }
    assertTrue(fs.mkdirs(appLogsDir));
    createContainerLogInLocalDir(appLogsDir, containerId, fs, fileName,
        content);
    // upload container logs to remote log dir
    Path path = new Path(conf.get(YarnConfiguration.NM_REMOTE_APP_LOG_DIR),
        user + "/logs/" + appId.toString());
    if (fs.exists(path) && deleteRemoteLogDir) {
      fs.delete(path, true);
    }
    assertTrue(fs.mkdirs(path));
    uploadContainerLogIntoRemoteDir(ugi, conf, rootLogDirList, nodeId,
        containerId, path, fs);
  }
  private static void createContainerLogInLocalDir(Path appLogsDir,
      ContainerId containerId, FileSystem fs, String fileName, String content)
      throws IOException{
    Path containerLogsDir = new Path(appLogsDir, containerId.toString());
    if (fs.exists(containerLogsDir)) {
      fs.delete(containerLogsDir, true);
    }
    assertTrue(fs.mkdirs(containerLogsDir));
    Writer writer =
        new FileWriter(new File(containerLogsDir.toString(), fileName));
    writer.write(content);
    writer.close();
  }
  private static void uploadContainerLogIntoRemoteDir(UserGroupInformation ugi,
      Configuration configuration, List<String> rootLogDirs, NodeId nodeId,
      ContainerId containerId, Path appDir, FileSystem fs) throws IOException {
    Path path =
        new Path(appDir, LogAggregationUtils.getNodeString(nodeId));
    AggregatedLogFormat.LogWriter writer =
        new AggregatedLogFormat.LogWriter(configuration, path, ugi);
    writer.writeApplicationOwner(ugi.getUserName());
    writer.append(new AggregatedLogFormat.LogKey(containerId),
        new AggregatedLogFormat.LogValue(rootLogDirs, containerId,
        ugi.getShortUserName()));
    writer.close();
  }
}
 
 | 
	apache_hadoop 
 | 
	2017-01-28 
 | 
	312b36d113d83640b92c62fdd91ede74bd04c00f 
 | 
					
			Subsets and Splits
				
	
				
			
				
No community queries yet
The top public SQL queries from the community will appear here once available.