code
stringlengths 3
1.05M
| repo_name
stringlengths 4
116
| path
stringlengths 3
942
| language
stringclasses 30
values | license
stringclasses 15
values | size
int32 3
1.05M
|
---|---|---|---|---|---|
#!/bin/bash
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
. $DIR/.gr8_env.sh
if [ "$#" -ne 2 ]; then
echo "Usage: " `basename $0` "job_id [success_status|job_id|run_status]"
exit 0
fi
curl -s -X GET \
-H "X-Auth-Token: $GR8_BI_TOK" \
$GR8_BASE_URL/current/$GR8_BI_ACT/job/$1/stats/$2 | tee $DIR/.lastresponse
| nagoodman/gr8bi_extras | cli/gr8_statsJob.sh | Shell | apache-2.0 | 330 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import webob
from cinder.api.contrib import volume_type_access as type_access
from cinder.api.v2 import types as types_api_v2
from cinder import context
from cinder import db
from cinder import exception
from cinder import test
from cinder.tests.unit.api import fakes
from cinder.tests.unit import fake_constants as fake
def generate_type(type_id, is_public):
return {
'id': type_id,
'name': u'test',
'deleted': False,
'created_at': datetime.datetime(2012, 1, 1, 1, 1, 1, 1),
'updated_at': None,
'deleted_at': None,
'is_public': bool(is_public)
}
VOLUME_TYPES = {
fake.VOLUME_TYPE_ID: generate_type(fake.VOLUME_TYPE_ID, True),
fake.VOLUME_TYPE2_ID: generate_type(fake.VOLUME_TYPE2_ID, True),
fake.VOLUME_TYPE3_ID: generate_type(fake.VOLUME_TYPE3_ID, False),
fake.VOLUME_TYPE4_ID: generate_type(fake.VOLUME_TYPE4_ID, False)}
PROJ1_UUID = fake.PROJECT_ID
PROJ2_UUID = fake.PROJECT2_ID
PROJ3_UUID = fake.PROJECT3_ID
ACCESS_LIST = [{'volume_type_id': fake.VOLUME_TYPE3_ID,
'project_id': PROJ2_UUID},
{'volume_type_id': fake.VOLUME_TYPE3_ID,
'project_id': PROJ3_UUID},
{'volume_type_id': fake.VOLUME_TYPE4_ID,
'project_id': PROJ3_UUID}]
def fake_volume_type_get(context, id, inactive=False, expected_fields=None):
vol = VOLUME_TYPES[id]
if expected_fields and 'projects' in expected_fields:
vol['projects'] = [a['project_id']
for a in ACCESS_LIST if a['volume_type_id'] == id]
return vol
def _has_type_access(type_id, project_id):
for access in ACCESS_LIST:
if access['volume_type_id'] == type_id and \
access['project_id'] == project_id:
return True
return False
def fake_volume_type_get_all(context, inactive=False, filters=None,
marker=None, limit=None, sort_keys=None,
sort_dirs=None, offset=None, list_result=False):
if filters is None or filters['is_public'] is None:
if list_result:
return list(VOLUME_TYPES.values())
return VOLUME_TYPES
res = {}
for k, v in VOLUME_TYPES.items():
if filters['is_public'] and _has_type_access(k, context.project_id):
res.update({k: v})
continue
if v['is_public'] == filters['is_public']:
res.update({k: v})
if list_result:
return list(res.values())
return res
class FakeResponse(object):
obj = {'volume_type': {'id': fake.VOLUME_TYPE_ID},
'volume_types': [
{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE3_ID}]}
def attach(self, **kwargs):
pass
class FakeRequest(object):
environ = {"cinder.context": context.get_admin_context()}
def cached_resource_by_id(self, resource_id, name=None):
return VOLUME_TYPES[resource_id]
class VolumeTypeAccessTest(test.TestCase):
def setUp(self):
super(VolumeTypeAccessTest, self).setUp()
self.type_controller_v2 = types_api_v2.VolumeTypesController()
self.type_access_controller = type_access.VolumeTypeAccessController()
self.type_action_controller = type_access.VolumeTypeActionController()
self.req = FakeRequest()
self.context = self.req.environ['cinder.context']
self.stubs.Set(db, 'volume_type_get',
fake_volume_type_get)
self.stubs.Set(db, 'volume_type_get_all',
fake_volume_type_get_all)
def assertVolumeTypeListEqual(self, expected, observed):
self.assertEqual(len(expected), len(observed))
expected = sorted(expected, key=lambda item: item['id'])
observed = sorted(observed, key=lambda item: item['id'])
for d1, d2 in zip(expected, observed):
self.assertEqual(d1['id'], d2['id'])
def test_list_type_access_public(self):
"""Querying os-volume-type-access on public type should return 404."""
req = fakes.HTTPRequest.blank('/v2/%s/types/os-volume-type-access' %
fake.PROJECT_ID,
use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound,
self.type_access_controller.index,
req, fake.VOLUME_TYPE2_ID)
def test_list_type_access_private(self):
expected = {'volume_type_access': [
{'volume_type_id': fake.VOLUME_TYPE3_ID,
'project_id': PROJ2_UUID},
{'volume_type_id': fake.VOLUME_TYPE3_ID,
'project_id': PROJ3_UUID}]}
result = self.type_access_controller.index(self.req,
fake.VOLUME_TYPE3_ID)
self.assertEqual(expected, result)
def test_list_with_no_context(self):
req = fakes.HTTPRequest.blank('/v2/flavors/%s/flavors' %
fake.PROJECT_ID)
def fake_authorize(context, target=None, action=None):
raise exception.PolicyNotAuthorized(action='index')
self.stubs.Set(type_access, 'authorize', fake_authorize)
self.assertRaises(exception.PolicyNotAuthorized,
self.type_access_controller.index,
req, fake.PROJECT_ID)
def test_list_type_with_admin_default_proj1(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID,
use_admin_context=True)
req.environ['cinder.context'].project_id = PROJ1_UUID
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_admin_default_proj2(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID},
{'id': fake.VOLUME_TYPE3_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types' % PROJ2_UUID,
use_admin_context=True)
req.environ['cinder.context'].project_id = PROJ2_UUID
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_admin_ispublic_true(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=true' %
fake.PROJECT_ID,
use_admin_context=True)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_admin_ispublic_false(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE3_ID},
{'id': fake.VOLUME_TYPE4_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=false' %
fake.PROJECT_ID,
use_admin_context=True)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_admin_ispublic_false_proj2(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE3_ID},
{'id': fake.VOLUME_TYPE4_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=false' %
fake.PROJECT_ID,
use_admin_context=True)
req.environ['cinder.context'].project_id = PROJ2_UUID
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_admin_ispublic_none(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID},
{'id': fake.VOLUME_TYPE3_ID},
{'id': fake.VOLUME_TYPE4_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=none' %
fake.PROJECT_ID,
use_admin_context=True)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_no_admin_default(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types' % fake.PROJECT_ID,
use_admin_context=False)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_no_admin_ispublic_true(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=true' %
fake.PROJECT_ID,
use_admin_context=False)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_no_admin_ispublic_false(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=false' %
fake.PROJECT_ID,
use_admin_context=False)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_list_type_with_no_admin_ispublic_none(self):
expected = {'volume_types': [{'id': fake.VOLUME_TYPE_ID},
{'id': fake.VOLUME_TYPE2_ID}]}
req = fakes.HTTPRequest.blank('/v2/%s/types?is_public=none' %
fake.PROJECT_ID,
use_admin_context=False)
result = self.type_controller_v2.index(req)
self.assertVolumeTypeListEqual(expected['volume_types'],
result['volume_types'])
def test_show(self):
resp = FakeResponse()
self.type_action_controller.show(self.req, resp, fake.VOLUME_TYPE_ID)
self.assertEqual({'id': fake.VOLUME_TYPE_ID,
'os-volume-type-access:is_public': True},
resp.obj['volume_type'])
def test_detail(self):
resp = FakeResponse()
self.type_action_controller.detail(self.req, resp)
self.assertEqual(
[{'id': fake.VOLUME_TYPE_ID,
'os-volume-type-access:is_public': True},
{'id': fake.VOLUME_TYPE3_ID,
'os-volume-type-access:is_public': False}],
resp.obj['volume_types'])
def test_create(self):
resp = FakeResponse()
self.type_action_controller.create(self.req, {}, resp)
self.assertEqual({'id': fake.VOLUME_TYPE_ID,
'os-volume-type-access:is_public': True},
resp.obj['volume_type'])
def test_add_project_access(self):
def stub_add_volume_type_access(context, type_id, project_id):
self.assertEqual(fake.VOLUME_TYPE4_ID, type_id, "type_id")
self.assertEqual(PROJ2_UUID, project_id, "project_id")
self.stubs.Set(db, 'volume_type_access_add',
stub_add_volume_type_access)
body = {'addProjectAccess': {'project': PROJ2_UUID}}
req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % (
fake.PROJECT_ID, fake.VOLUME_TYPE3_ID),
use_admin_context=True)
result = self.type_action_controller._addProjectAccess(
req, fake.VOLUME_TYPE4_ID, body)
self.assertEqual(202, result.status_code)
def test_add_project_access_with_no_admin_user(self):
req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % (
fake.PROJECT_ID, fake.VOLUME_TYPE3_ID),
use_admin_context=False)
body = {'addProjectAccess': {'project': PROJ2_UUID}}
self.assertRaises(exception.PolicyNotAuthorized,
self.type_action_controller._addProjectAccess,
req, fake.VOLUME_TYPE3_ID, body)
def test_add_project_access_with_already_added_access(self):
def stub_add_volume_type_access(context, type_id, project_id):
raise exception.VolumeTypeAccessExists(volume_type_id=type_id,
project_id=project_id)
self.stubs.Set(db, 'volume_type_access_add',
stub_add_volume_type_access)
body = {'addProjectAccess': {'project': PROJ2_UUID}}
req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % (
fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=True)
self.assertRaises(webob.exc.HTTPConflict,
self.type_action_controller._addProjectAccess,
req, fake.VOLUME_TYPE3_ID, body)
def test_remove_project_access_with_bad_access(self):
def stub_remove_volume_type_access(context, type_id, project_id):
raise exception.VolumeTypeAccessNotFound(volume_type_id=type_id,
project_id=project_id)
self.stubs.Set(db, 'volume_type_access_remove',
stub_remove_volume_type_access)
body = {'removeProjectAccess': {'project': PROJ2_UUID}}
req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % (
fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=True)
self.assertRaises(webob.exc.HTTPNotFound,
self.type_action_controller._removeProjectAccess,
req, fake.VOLUME_TYPE4_ID, body)
def test_remove_project_access_with_no_admin_user(self):
req = fakes.HTTPRequest.blank('/v2/%s/types/%s/action' % (
fake.PROJECT_ID, fake.VOLUME_TYPE3_ID), use_admin_context=False)
body = {'removeProjectAccess': {'project': PROJ2_UUID}}
self.assertRaises(exception.PolicyNotAuthorized,
self.type_action_controller._removeProjectAccess,
req, fake.VOLUME_TYPE3_ID, body)
| bswartz/cinder | cinder/tests/unit/api/contrib/test_volume_type_access.py | Python | apache-2.0 | 15,995 |
package com.ryanharter.auto.value.moshi.example;
import com.google.auto.value.AutoValue;
import com.squareup.moshi.JsonAdapter;
import com.squareup.moshi.Moshi;
import java.lang.reflect.Type;
@AutoValue public abstract class GenericsExample<A, B, C> {
public abstract A a();
public abstract B b();
public abstract C c();
@AutoValue.Builder
public interface Builder<A, B, C> {
Builder<A, B, C> a(A a);
Builder<A, B, C> b(B b);
Builder<A, B, C> c(C c);
GenericsExample<A, B, C> build();
}
public static <A, B, C> Builder<A, B, C> builder() {
return new AutoValue_GenericsExample.Builder<A, B, C>();
}
public static <A, B, C> JsonAdapter<GenericsExample<A, B, C>> jsonAdapter(Moshi moshi, Type[] types) {
return new AutoValue_GenericsExample.MoshiJsonAdapter(moshi, types);
}
}
| rharter/auto-value-moshi | example/src/main/java/com/ryanharter/auto/value/moshi/example/GenericsExample.java | Java | apache-2.0 | 870 |
# Copyright 2020 Department of Computational Biology for Infection Research - Helmholtz Centre for Infection Research
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
from src.utils import labels as utils_labels
from src.utils import load_ncbi_taxinfo
from src import binning_classes
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
import matplotlib.ticker as ticker
import numpy as np
import os, sys, inspect
import pandas as pd
from collections import OrderedDict
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
def create_colors_list():
colors_list = []
for color in plt.cm.tab10(np.linspace(0, 1, 10))[:-1]:
colors_list.append(tuple(color))
colors_list.append("black")
for color in plt.cm.Set2(np.linspace(0, 1, 8)):
colors_list.append(tuple(color))
for color in plt.cm.Set3(np.linspace(0, 1, 12)):
colors_list.append(tuple(color))
return colors_list
def create_legend(color_indices, available_tools, output_dir):
colors_list = create_colors_list()
if color_indices:
colors_list = [colors_list[i] for i in color_indices]
colors_iter = iter(colors_list)
circles = [Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=10, markerfacecolor=next(colors_iter)) for label in available_tools]
fig = plt.figure(figsize=(0.5, 0.5))
fig.legend(circles, available_tools, loc='center', frameon=False, ncol=5, handletextpad=0.1)
fig.savefig(os.path.join(output_dir, 'genome', 'legend.pdf'), dpi=100, format='pdf', bbox_inches='tight')
plt.close(fig)
def plot_precision_vs_bin_size(pd_bins, output_dir):
pd_plot = pd_bins[pd_bins[utils_labels.TOOL] != utils_labels.GS]
for tool_label, pd_tool in pd_plot.groupby(utils_labels.TOOL):
fig, axs = plt.subplots(figsize=(5, 4.5))
axs.scatter(np.log(pd_tool['total_length']), pd_tool['precision_bp'], marker='o')
axs.set_xlim([None, np.log(pd_tool['total_length'].max())])
axs.set_ylim([0.0, 1.0])
axs.set_title(tool_label, fontsize=12)
plt.ylabel('Purity per bin (%)', fontsize=12)
plt.xlabel('Bin size [log(# bp)]', fontsize=12)
fig.savefig(os.path.join(output_dir, 'genome', tool_label, 'purity_vs_bin_size.png'), dpi=200, format='png', bbox_inches='tight')
plt.close(fig)
def plot_by_genome_coverage(pd_bins, pd_target_column, available_tools, output_dir):
colors_list = create_colors_list()
if len(available_tools) > len(colors_list):
raise RuntimeError("Plot only supports 29 colors")
fig, axs = plt.subplots(figsize=(5, 4.5))
for i, (color, tool) in enumerate(zip(colors_list, available_tools)):
pd_tool = pd_bins[pd_bins[utils_labels.TOOL] == tool].sort_values(by=['genome_index'])
axs.scatter(pd_tool['genome_coverage'], pd_tool[pd_target_column], marker='o', color=colors_list[i], s=[3] * pd_tool.shape[0])
window = 50
rolling_mean = pd_tool[pd_target_column].rolling(window=window, min_periods=10).mean()
axs.plot(pd_tool['genome_coverage'], rolling_mean, color=colors_list[i])
axs.set_ylim([-0.01, 1.01])
axs.set_xticklabels(['{:,.1f}'.format(np.exp(x)) for x in axs.get_xticks()], fontsize=12)
axs.set_yticklabels(['{:3.0f}'.format(x * 100) for x in axs.get_yticks()], fontsize=12)
axs.tick_params(axis='x', labelsize=12)
if pd_target_column == 'precision_bp':
ylabel = 'Purity per bin (%)'
file_name = 'purity_by_genome_coverage'
else:
ylabel = 'Completeness per genome (%)'
file_name = 'completeness_by_genome_coverage'
plt.ylabel(ylabel, fontsize=15)
plt.xlabel('Average genome coverage', fontsize=15)
colors_iter = iter(colors_list)
circles = []
for x in range(len(available_tools)):
circles.append(Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=11, markerfacecolor=next(colors_iter)))
lgd = plt.legend(circles, available_tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False, fontsize=14)
fig.savefig(os.path.join(output_dir, 'genome', file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def get_pd_genomes_recall(sample_id_to_queries_list):
pd_genomes_recall = pd.DataFrame()
for sample_id in sample_id_to_queries_list:
for query in sample_id_to_queries_list[sample_id]:
if not isinstance(query, binning_classes.GenomeQuery):
continue
recall_df = query.recall_df_cami1[['genome_id', 'recall_bp']].copy()
recall_df[utils_labels.TOOL] = query.label
recall_df['sample_id'] = sample_id
recall_df = recall_df.reset_index().set_index(['sample_id', utils_labels.TOOL])
pd_genomes_recall = pd.concat([pd_genomes_recall, recall_df])
return pd_genomes_recall
def plot_precision_recall_by_coverage(sample_id_to_queries_list, pd_bins_g, coverages_pd, available_tools, output_dir):
# compute average genome coverage if coverages for multiple samples were provided
coverages_pd = coverages_pd.groupby(['GENOMEID']).mean()
coverages_pd.rename(columns={'GENOMEID': 'genome_id'})
coverages_pd = coverages_pd.sort_values(by=['COVERAGE'])
coverages_pd['rank'] = coverages_pd['COVERAGE'].rank()
pd_genomes_recall = get_pd_genomes_recall(sample_id_to_queries_list)
pd_genomes_recall['genome_index'] = pd_genomes_recall['genome_id'].map(coverages_pd['rank'].to_dict())
pd_genomes_recall = pd_genomes_recall.reset_index()
pd_genomes_recall['genome_coverage'] = np.log(pd_genomes_recall['genome_id'].map(coverages_pd['COVERAGE'].to_dict()))
plot_by_genome_coverage(pd_genomes_recall, 'recall_bp', available_tools, output_dir)
pd_bins_precision = pd_bins_g[[utils_labels.TOOL, 'precision_bp', 'genome_id']].copy().dropna(subset=['precision_bp'])
pd_bins_precision['genome_index'] = pd_bins_precision['genome_id'].map(coverages_pd['rank'].to_dict())
pd_bins_precision['genome_coverage'] = np.log(pd_bins_precision['genome_id'].map(coverages_pd['COVERAGE'].to_dict()))
plot_by_genome_coverage(pd_bins_precision, 'precision_bp', available_tools, output_dir)
def plot_heatmap(df_confusion, sample_id, output_dir, label, separate_bar=False, log_scale=False):
if log_scale:
df_confusion = df_confusion.apply(np.log10, inplace=True).replace(-np.inf, 0)
fig, axs = plt.subplots(figsize=(10, 8))
fontsize = 20
# replace columns and rows labels by numbers
d = {value: key for (key, value) in enumerate(df_confusion.columns.tolist(), 1)}
df_confusion = df_confusion.rename(index=str, columns=d)
df_confusion.index = range(1, len(df_confusion) + 1)
xticklabels = int(round(df_confusion.shape[1] / 10, -1))
yticklabels = int(round(df_confusion.shape[0] / 10, -1))
sns_plot = sns.heatmap(df_confusion, ax=axs, annot=False, cmap="YlGnBu_r", xticklabels=xticklabels, yticklabels=yticklabels, cbar=False, rasterized=True)
# sns_plot = sns.heatmap(df_confusion, ax=axs, annot=False, cmap="YlGnBu_r", xticklabels=False, yticklabels=False, cbar=True, rasterized=True)
sns_plot.set_xlabel("Genomes", fontsize=fontsize)
sns_plot.set_ylabel("Predicted bins", fontsize=fontsize)
plt.yticks(fontsize=12, rotation=0)
plt.xticks(fontsize=12)
mappable = sns_plot.get_children()[0]
cbar_ax = fig.add_axes([.915, .11, .017, .77])
cbar = plt.colorbar(mappable, ax=axs, cax=cbar_ax, orientation='vertical')
if log_scale:
cbar.set_label(fontsize=fontsize, label='log$_{10}$(# bp)')
else:
fmt = lambda x, pos: '{:.0f}'.format(x / 1000000)
cbar = plt.colorbar(mappable, ax=axs, cax=cbar_ax, orientation='vertical', format=ticker.FuncFormatter(fmt))
cbar.set_label(fontsize=fontsize, label='Millions of base pairs')
cbar.ax.tick_params(labelsize=fontsize)
cbar.outline.set_edgecolor(None)
axs.set_title(label, fontsize=fontsize, pad=10)
axs.set_ylim([len(df_confusion), 0])
# plt.yticks(fontsize=14, rotation=0)
# plt.xticks(fontsize=14)
output_dir = os.path.join(output_dir, 'genome', label)
fig.savefig(os.path.join(output_dir, 'heatmap_' + sample_id + '.pdf'), dpi=100, format='pdf', bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'heatmap_' + sample_id + '.png'), dpi=200, format='png', bbox_inches='tight')
plt.close(fig)
if not separate_bar:
return
# create separate figure for bar
fig = plt.figure(figsize=(6, 6))
mappable = sns_plot.get_children()[0]
fmt = lambda x, pos: '{:.0f}'.format(x / 1000000)
cbar = plt.colorbar(mappable, orientation='vertical', label='[millions of base pairs]', format=ticker.FuncFormatter(fmt))
text = cbar.ax.yaxis.label
font = matplotlib.font_manager.FontProperties(size=16)
text.set_font_properties(font)
cbar.outline.set_visible(False)
cbar.ax.tick_params(labelsize=14)
# store separate bar figure
plt.gca().set_visible(False)
fig.savefig(os.path.join(output_dir, 'heatmap_bar.pdf'), dpi=100, format='pdf', bbox_inches='tight')
plt.close(fig)
def plot_boxplot(sample_id_to_queries_list, metric_name, output_dir, available_tools):
pd_bins = pd.DataFrame()
for sample_id in sample_id_to_queries_list:
for query in sample_id_to_queries_list[sample_id]:
metric_df = getattr(query, metric_name.replace('_bp', '_df')).copy()
metric_df[utils_labels.TOOL] = query.label
metric_df['sample_id'] = sample_id
metric_df = metric_df.reset_index().set_index(['sample_id', utils_labels.TOOL])
pd_bins = pd.concat([pd_bins, metric_df])
metric_all = []
for tool in available_tools:
pd_tool = pd_bins.iloc[pd_bins.index.get_level_values(utils_labels.TOOL) == tool]
metric_all.append(pd_tool[metric_name][pd_tool[metric_name].notnull()].tolist())
fig, axs = plt.subplots(figsize=(6, 5))
medianprops = dict(linewidth=2.5, color='gold')
bplot = axs.boxplot(metric_all, notch=0, vert=0, patch_artist=True, labels=available_tools, medianprops=medianprops, sym='k.')
colors_iter = iter(create_colors_list())
# turn on grid
axs.grid(which='major', linestyle=':', linewidth='0.5', color='lightgrey')
# force axes to be from 0 to 100%
axs.set_xlim([-0.01, 1.01])
# transform plot_labels to percentages
vals = axs.get_xticks()
axs.set_xticklabels(['{:3.0f}'.format(x * 100) for x in vals])
# enable code to rotate labels
tick_labels = axs.get_yticklabels()
plt.setp(tick_labels, fontsize=13) ## rotation=55
for box in bplot['boxes']:
box.set(facecolor=next(colors_iter), linewidth=0.1)
plt.ylim(plt.ylim()[::-1])
if metric_name == 'precision_bp':
axs.set_xlabel('Purity per bin (%)', fontsize=13)
metric_name = 'purity_bp'
else:
axs.set_xlabel('Completeness per genome (%)', fontsize=13)
metric_name = 'completeness_bp'
fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '.pdf'), dpi=100, format='pdf', bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '.png'), dpi=200, format='png', bbox_inches='tight')
# remove labels but keep grid
# axs.get_yaxis().set_ticklabels([])
# for tic in axs.yaxis.get_major_ticks():
# tic.tick1line.set_visible(False)
# tic.tick2line.set_visible(False)
# tic.label1.set_visible(False)
# tic.label2.set_visible(False)
# fig.savefig(os.path.join(output_dir, 'genome', 'boxplot_' + metric_name + '_wo_legend.pdf'), dpi=100, format='pdf', bbox_inches='tight')
plt.close(fig)
def plot_summary(color_indices, df_results, labels, output_dir, rank, plot_type, file_name, xlabel, ylabel):
available_tools = df_results[utils_labels.TOOL].unique()
tools = [tool for tool in labels if tool in available_tools]
colors_list = create_colors_list()
if color_indices:
colors_list = [colors_list[i] for i in color_indices]
df_mean = df_results.groupby(utils_labels.TOOL).mean().reindex(tools)
binning_type = df_results[utils_labels.BINNING_TYPE].iloc[0]
if len(df_mean) > len(colors_list):
raise RuntimeError("Plot only supports 29 colors")
fig, axs = plt.subplots(figsize=(5, 4.5))
# force axes to be from 0 to 100%
axs.set_xlim([0.0, 1.0])
axs.set_ylim([0.0, 1.0])
if plot_type == 'e':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.errorbar(df_row[utils_labels.AVG_PRECISION_BP], df_row[utils_labels.AVG_RECALL_BP], xerr=df_row['avg_precision_bp_var'], yerr=df_row['avg_recall_bp_var'],
fmt='o',
ecolor=colors_list[i],
mec=colors_list[i],
mfc=colors_list[i],
capsize=3,
markersize=8)
if plot_type == 'f':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.errorbar(df_row[utils_labels.AVG_PRECISION_SEQ], df_row[utils_labels.AVG_RECALL_SEQ], xerr=df_row[utils_labels.AVG_PRECISION_SEQ_SEM], yerr=df_row[utils_labels.AVG_RECALL_SEQ_SEM],
fmt='o',
ecolor=colors_list[i],
mec=colors_list[i],
mfc=colors_list[i],
capsize=3,
markersize=8)
if plot_type == 'w':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.plot(df_row[utils_labels.PRECISION_PER_BP], df_row[utils_labels.RECALL_PER_BP], marker='o', color=colors_list[i], markersize=10)
if plot_type == 'x':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.plot(df_row[utils_labels.PRECISION_PER_SEQ], df_row[utils_labels.RECALL_PER_SEQ], marker='o', color=colors_list[i], markersize=10)
elif plot_type == 'p':
for i, (tool, df_row) in enumerate(df_mean.iterrows()):
axs.plot(df_row[utils_labels.ARI_BY_BP], df_row[utils_labels.PERCENTAGE_ASSIGNED_BPS], marker='o', color=colors_list[i], markersize=10)
# turn on grid
# axs.minorticks_on()
axs.grid(which='major', linestyle=':', linewidth='0.5')
# axs.grid(which='minor', linestyle=':', linewidth='0.5')
# transform plot_labels to percentages
if plot_type != 'p':
vals = axs.get_xticks()
axs.set_xticklabels(['{:3.0f}'.format(x * 100) for x in vals], fontsize=11)
else:
axs.tick_params(axis='x', labelsize=12)
vals = axs.get_yticks()
axs.set_yticklabels(['{:3.0f}'.format(x * 100) for x in vals], fontsize=11)
if rank:
file_name = rank + '_' + file_name
plt.title(rank)
ylabel = ylabel.replace('genome', 'taxon')
plt.xlabel(xlabel, fontsize=13)
plt.ylabel(ylabel, fontsize=13)
plt.tight_layout()
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.eps'), dpi=100, format='eps', bbox_inches='tight')
colors_iter = iter(colors_list)
circles = []
for x in range(len(df_mean)):
circles.append(Line2D([], [], markeredgewidth=0.0, linestyle="None", marker="o", markersize=11, markerfacecolor=next(colors_iter)))
lgd = plt.legend(circles, tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=0, frameon=False, fontsize=12)
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.png'), dpi=200, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def plot_avg_precision_recall(colors, df_results, labels, output_dir, rank=None):
plot_summary(colors,
df_results,
labels,
output_dir,
rank,
'e',
'avg_purity_completeness_bp',
'Average purity per bin (%)',
'Average completeness per genome (%)')
plot_summary(colors,
df_results,
labels,
output_dir,
rank,
'f',
'avg_purity_completeness_seq',
'Average purity per bin (%)',
'Average completeness per genome (%)')
def plot_precision_recall(colors, summary_per_query, labels, output_dir, rank=None):
plot_summary(colors,
summary_per_query,
labels,
output_dir,
rank,
'w',
'purity_recall_bp',
'Purity for sample (%)',
'Completeness for sample (%)')
plot_summary(colors,
summary_per_query,
labels,
output_dir,
rank,
'x',
'purity_completeness_seq',
'Purity for sample (%)',
'Completeness for sample (%)')
def plot_adjusted_rand_index_vs_assigned_bps(colors, summary_per_query, labels, output_dir, rank=None):
plot_summary(colors,
summary_per_query,
labels,
output_dir,
rank,
'p',
'ari_vs_assigned_bps',
'Adjusted Rand index',
'Percentage of binned base pairs')
def plot_taxonomic_results(df_summary_t, metrics_list, errors_list, file_name, output_dir):
colors_list = ["#006cba", "#008000", "#ba9e00", "red"]
for tool, pd_results in df_summary_t.groupby(utils_labels.TOOL):
dict_metric_list = []
for metric in metrics_list:
rank_to_metric = OrderedDict([(k, .0) for k in load_ncbi_taxinfo.RANKS])
dict_metric_list.append(rank_to_metric)
dict_error_list = []
for error in errors_list:
rank_to_metric_error = OrderedDict([(k, .0) for k in load_ncbi_taxinfo.RANKS])
dict_error_list.append(rank_to_metric_error)
for index, row in pd_results.iterrows():
for rank_to_metric, metric in zip(dict_metric_list, metrics_list):
rank_to_metric[row[utils_labels.RANK]] = .0 if np.isnan(row[metric]) else row[metric]
for rank_to_metric_error, error in zip(dict_error_list, errors_list):
rank_to_metric_error[row[utils_labels.RANK]] = .0 if np.isnan(row[error]) else row[error]
fig, axs = plt.subplots(figsize=(6, 5))
# force axes to be from 0 to 100%
axs.set_xlim([0, 7])
axs.set_ylim([0.0, 1.0])
x_values = range(len(load_ncbi_taxinfo.RANKS))
y_values_list = []
for rank_to_metric, color in zip(dict_metric_list, colors_list):
y_values = list(rank_to_metric.values())
axs.plot(x_values, y_values, color=color)
y_values_list.append(y_values)
for rank_to_metric_error, y_values, color in zip(dict_error_list, y_values_list, colors_list):
sem = list(rank_to_metric_error.values())
plt.fill_between(x_values, np.subtract(y_values, sem).tolist(), np.add(y_values, sem).tolist(), color=color, alpha=0.5)
plt.xticks(x_values, load_ncbi_taxinfo.RANKS, rotation='vertical')
vals = axs.get_yticks()
axs.set_yticklabels(['{:3.0f}%'.format(x * 100) for x in vals])
lgd = plt.legend(metrics_list, loc=1, borderaxespad=0., handlelength=2, frameon=False)
plt.tight_layout()
fig.savefig(os.path.join(output_dir, 'taxonomic', tool, file_name + '.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'taxonomic', tool, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def create_contamination_column(pd_tool_bins):
pd_tool_bins['newcolumn'] = 1 - pd_tool_bins['precision_bp']
def create_completeness_minus_contamination_column(pd_tool_bins):
pd_tool_bins['newcolumn'] = pd_tool_bins['recall_bp'] + pd_tool_bins['precision_bp'] - 1
def plot_contamination(pd_bins, binning_type, title, xlabel, ylabel, create_column_function, output_dir):
if len(pd_bins) == 0:
return
pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
create_column_function(pd_bins_copy)
colors_list = create_colors_list()
fig, axs = plt.subplots(figsize=(6, 5))
tools = pd_bins_copy[utils_labels.TOOL].unique().tolist()
for color, tool in zip(colors_list, tools):
pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
pd_tool_bins = pd_tool_bins.sort_values(by='newcolumn', ascending=False).reset_index()
pd_tool_bins = pd_tool_bins.drop(['index'], axis=1)
axs.plot(list(range(1, len(pd_tool_bins) + 1)), pd_tool_bins['newcolumn'], color=color)
min_value = pd_bins_copy['newcolumn'].min()
axs.set_ylim(min_value if min_value < 1.0 else .9, 1.0)
axs.set_xlim(1, None)
axs.grid(which='major', linestyle='-', linewidth='0.5', color='lightgrey')
# transform plot_labels to percentages
vals = axs.get_yticks()
axs.set_yticklabels(['{:3.0f}'.format(y * 100) for y in vals])
plt.xlabel(xlabel, fontsize=14)
plt.ylabel(ylabel + ' [%]', fontsize=14)
lgd = plt.legend(tools, bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0., handlelength=1, frameon=False, fontsize=12)
plt.tight_layout()
file_name = title.lower().replace(' ', '_').replace('-', 'minus').replace('|', '')
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.png'), dpi=100, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, binning_type, file_name + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
def get_number_of_hq_bins(tools, pd_bins):
pd_counts = pd.DataFrame()
pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
for tool in tools:
pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
x50 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .5) & (pd_tool_bins['precision_bp'] > .9)].shape[0]
x70 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .7) & (pd_tool_bins['precision_bp'] > .9)].shape[0]
x90 = pd_tool_bins[(pd_tool_bins['recall_bp'] > .9) & (pd_tool_bins['precision_bp'] > .9)].shape[0]
pd_tool_counts = pd.DataFrame([[x90, x70, x50]], columns=['>90%', '>70%', '>50%'], index=[tool])
pd_counts = pd_counts.append(pd_tool_counts)
return pd_counts
def get_number_of_hq_bins_by_score(tools, pd_bins):
pd_counts = pd.DataFrame()
pd_bins_copy = pd_bins[[utils_labels.TOOL, 'precision_bp', 'recall_bp']].copy().dropna(subset=['precision_bp'])
pd_bins_copy['newcolumn'] = pd_bins_copy['recall_bp'] + 5 * (pd_bins_copy['precision_bp'] - 1)
for tool in tools:
pd_tool_bins = pd_bins_copy[pd_bins_copy[utils_labels.TOOL] == tool]
x50 = pd_tool_bins[pd_tool_bins['newcolumn'] > .5].shape[0]
x70 = pd_tool_bins[pd_tool_bins['newcolumn'] > .7].shape[0]
x90 = pd_tool_bins[pd_tool_bins['newcolumn'] > .9].shape[0]
x50 -= x70
x70 -= x90
pd_tool_counts = pd.DataFrame([[x90, x70, x50]], columns=['>90', '>70', '>50'], index=[tool])
pd_counts = pd_counts.append(pd_tool_counts)
return pd_counts
def plot_counts(pd_bins, tools, output_dir, output_file, get_bin_counts_function):
pd_counts = get_bin_counts_function(tools, pd_bins)
fig, axs = plt.subplots(figsize=(11, 5))
if output_file == 'bin_counts':
fig = pd_counts.plot.bar(ax=axs, stacked=False, color=['#28334AFF', '#FBDE44FF', '#F65058FF'], width=.8, legend=None).get_figure()
else:
fig = pd_counts.plot.bar(ax=axs, stacked=True, color=['#9B4A97FF', '#FC766AFF', '#F9A12EFF'], width=.8, legend=None).get_figure()
axs.tick_params(axis='x', labelrotation=45, length=0)
axs.set_xticklabels(tools, horizontalalignment='right', fontsize=14)
axs.set_xlabel(None)
# axs.yaxis.set_major_locator(MaxNLocator(integer=True))
h, l = axs.get_legend_handles_labels()
axs.set_ylabel('#genome bins', fontsize=14)
# axs.grid(which='major', linestyle=':', linewidth='0.5')
# axs.grid(which='minor', linestyle=':', linewidth='0.5')
ph = [plt.plot([], marker='', ls='')[0]]
handles = ph + h
if output_file == 'bin_counts':
labels = ['Contamination < 10% Completeness '] + l
bbox_to_anchor = (0.49, 1.02)
else:
labels = ['Score '] + l
y_values = (pd_counts['>90'] + pd_counts['>70'] + pd_counts['>50']).tolist()
for i, v in enumerate(y_values):
axs.text(i - .25, v + 5, str(v), color='black', fontweight='bold')
bbox_to_anchor = (0.47, 1.02)
lgd = plt.legend(handles, labels, bbox_to_anchor=bbox_to_anchor, columnspacing=.5, loc=8, borderaxespad=0., handlelength=1, frameon=False, fontsize=14, ncol=5)
# plt.subplots_adjust(hspace=0.6, wspace=0.2)
fig.savefig(os.path.join(output_dir, 'genome', output_file + '.pdf'), dpi=100, format='pdf', bbox_extra_artists=(lgd,), bbox_inches='tight')
fig.savefig(os.path.join(output_dir, 'genome', output_file + '.png'), dpi=200, format='png', bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close(fig)
| CAMI-challenge/AMBER | src/plots.py | Python | apache-2.0 | 26,475 |
package web.magic.jvm;
import java.lang.reflect.InvocationTargetException;
import java.lang.reflect.Method;
import java.lang.reflect.Proxy;
import java.lang.reflect.UndeclaredThrowableException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import javax.management.MBeanException;
import javax.management.MBeanServer;
import javax.management.ObjectName;
class MBeanTyper {
static final boolean DEBUG = Boolean.getBoolean("jboss.jmx.debug");
/**
* create a typed object from an mbean
*/
public static final Object typeMBean(MBeanServer server, ObjectName mbean, Class<?> mainInterface) throws Exception {
List<Class<?>> interfaces = new ArrayList<Class<?>>();
if (mainInterface.isInterface()) {
interfaces.add(mainInterface);
}
addInterfaces(mainInterface.getInterfaces(), interfaces);
Class<?> cl[] = (Class[]) interfaces.toArray(new Class[interfaces.size()]);
if (DEBUG) {
System.err.println("typeMean->server=" + server + ",mbean=" + mbean + ",mainInterface=" + mainInterface);
for (int c = 0; c < cl.length; c++) {
System.err.println(" :" + cl[c]);
}
}
return Proxy.newProxyInstance(Thread.currentThread().getContextClassLoader(), cl, new MBeanTyperInvoker(server,
mbean));
}
private static final void addInterfaces(Class<?> cl[], List<Class<?>> list) {
if (cl == null)
return;
for (int c = 0; c < cl.length; c++) {
list.add(cl[c]);
addInterfaces(cl[c].getInterfaces(), list);
}
}
}
/**
* MBeanTyperInvoker handles method invocations against the MBeanTyper target
* object and forwards them to the MBeanServer and ObjectName for invocation.
*
* @author <a href="mailto:[email protected]">Jeff Haynie</a>
*/
final class MBeanTyperInvoker implements java.lang.reflect.InvocationHandler {
private final MBeanServer server;
private final ObjectName mbean;
private final Map<Method, String[]> signatureCache = Collections.synchronizedMap(new HashMap<Method, String[]>());
MBeanTyperInvoker(MBeanServer server, ObjectName mbean) {
this.server = server;
this.mbean = mbean;
}
private boolean isJMXAttribute(Method m) {
String name = m.getName();
return (name.startsWith("get"));
}
public Object invoke(Object proxy, Method method, Object[] args) throws Throwable {
if (MBeanTyper.DEBUG) {
System.err.println(" ++ method=" + method.getName() + ",args=" + args);
}
try {
if (method.getDeclaringClass() == Object.class) {
String name = method.getName();
if (name.equals("hashCode")) {
return new Integer(this.hashCode());
} else if (name.equals("toString")) {
return this.toString();
} else if (name.equals("equals")) {
// FIXME: this needs to be reviewed - we should be
// smarter about this ...
return new Boolean(equals(args[0]));
}
} else if (isJMXAttribute(method) && (args == null || args.length <= 0)) {
String name = method.getName().substring(3);
return server.getAttribute(mbean, name);
}
String sig[] = (String[]) signatureCache.get(method);
if (sig == null) {
// get the method signature from the method argument directly
// vs. the arguments passed, since there may be primitives that
// are wrapped as objects in the arguments
Class<?> _args[] = method.getParameterTypes();
if (_args != null && _args.length > 0) {
sig = new String[_args.length];
for (int c = 0; c < sig.length; c++) {
if (_args[c] != null) {
sig[c] = _args[c].getName();
}
}
} else {
sig = new String[0];
}
signatureCache.put(method, sig);
}
return server.invoke(mbean, method.getName(), args, sig);
} catch (Throwable t) {
if (MBeanTyper.DEBUG) {
t.printStackTrace();
}
if (t instanceof UndeclaredThrowableException) {
UndeclaredThrowableException ut = (UndeclaredThrowableException) t;
throw ut.getUndeclaredThrowable();
} else if (t instanceof InvocationTargetException) {
InvocationTargetException it = (InvocationTargetException) t;
throw it.getTargetException();
} else if (t instanceof MBeanException) {
MBeanException me = (MBeanException) t;
throw me.getTargetException();
} else {
throw t;
}
}
}
} | liufeiit/WebMagic | WebMagic/src/test/java/web/magic/jvm/MBeanTyper.java | Java | apache-2.0 | 4,284 |
# grid Cheatsheet
> `display: grid` 布局
源自:http://grid.malven.co
| mengqing723/mengqing723.github.io | dist/demo/css/grid/README.md | Markdown | apache-2.0 | 76 |
// ----------------------------------------------------------------------------
// Module initialization
var Config = require("config").config;
var utils = require("utils");
var validators = require("validators");
// ----------------------------------------------------------------------------
// Setting class.
function Setting() {
$.title_label.text_id = this.args.title_id;
$.title_label.text = Alloy.Globals.L(this.args.title_id);
// This will trigger UI update. Ugly solution I know.
$.setting.top = this.args.top || 0;
if (typeof this.args.width !== 'undefined') {
$.setting.width = this.args.width;
}
// Listen to the "SettingChanges" event. It simply updates the string
// representation of the property that the view shows.
this.addSettingsChangedHandler(this.updateValue);
}
// Inherits from Controller...
Setting.prototype = new (require("controller"))(
arguments[0], [$.title_label]
);
// Read the actual value of the property that this setting is responsible for
Setting.prototype.updateValue = function() {
$.setting_value.text =
Alloy.Globals.L(Config.getProperty(this.args.propertyName).stringValue());
};
Setting.prototype.handleClick = function (initial, use, validator) {
var self = this;
var arg = {
useValue: function(value) {
if (eval("validators." + validator + "(value)")) {
use(self.args.propertyName, value);
self.updateValue();
} else {
alert(Alloy.Globals.L("illegal_value"));
}
},
value: initial,
validator: validator
};
utils.openWindowWithBottomClicksDisabled(this.args.controllerName, arg);
};
Setting.prototype.clickHandler = function() {
var initial = Config.getProperty(this.args.propertyName).get();
var validator = typeof this.args.validator !== 'undefined' ?
this.args.validator : "ok";
function use(n, v) {
Config.getProperty(n).set(v);
}
this.handleClick(initial, use, validator);
};
// ----------------------------------------------------------------------------
// Create the object representing this particular setting
var setting = new Setting();
// Handling button click event
function onClick(e) {
setting.clickHandler();
}
| garlictech/APIXMobil | app/controllers/setting.js | JavaScript | apache-2.0 | 2,314 |
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
<title><?php print $title; ?></title>
<!-- Bootstrap -->
<link href="css/bootstrap.min.css" rel="stylesheet">
<!-- HTML5 shim and Respond.js for IE8 support of HTML5 elements and media queries -->
<!-- WARNING: Respond.js doesn't work if you view the page via file:// -->
<!--[if lt IE 9]>
<script src="https://oss.maxcdn.com/html5shiv/3.7.3/html5shiv.min.js"></script>
<script src="https://oss.maxcdn.com/respond/1.4.2/respond.min.js"></script>
<![endif]-->
</head>
| ak229/personalWebsite | old/pages/header.php | PHP | apache-2.0 | 830 |
<?php
namespace Tecnoready\Common\Service\ObjectManager;
/**
* Trait de campos de configuracion
*
* @author Carlos Mendoza <[email protected]>
*/
trait TraitConfigure
{
/**
* Tipo de objeto que genera el historial (Facturas,Presupuestos,Contratos)
* @var string
*/
private $objectType;
/**
* Identificador unico del objeto dueno de los archivos (14114,DF-23454)
* @var string
*/
private $objectId;
public function configure($objectId, $objectType,array $options = [])
{
$this->objectId = $objectId;
$this->objectType = $objectType;
}
}
| Tecnoready/Common | Service/ObjectManager/TraitConfigure.php | PHP | apache-2.0 | 628 |
Map_347E30: dc.w Frame_347E4A-Map_347E30
dc.w Frame_347E4C-Map_347E30
dc.w Frame_347E66-Map_347E30
dc.w Frame_347E80-Map_347E30
dc.w Frame_347E9A-Map_347E30
dc.w Frame_347EB4-Map_347E30
dc.w Frame_347ECE-Map_347E30
dc.w Frame_347EE2-Map_347E30
dc.w Frame_347EFC-Map_347E30
dc.w Frame_347F16-Map_347E30
dc.w Frame_347F30-Map_347E30
dc.w Frame_347F4A-Map_347E30
dc.w Frame_347F6A-Map_347E30
Frame_347E4A: dc.w 0
Frame_347E4C: dc.w 4
dc.b $FC, $F, 0, 0,$FF,$FA
dc.b $EC, 7, 0,$10,$FF,$EA
dc.b $EC, 9, 0,$18,$FF,$FA
dc.b $DC, 9, 0,$1E,$FF,$F2
Frame_347E66: dc.w 4
dc.b $EE, 8, 0, 0,$FF,$F0
dc.b $F6, $D, 0, 3,$FF,$F0
dc.b 6, 8, 0, $B,$FF,$F8
dc.b $E, 6, 0, $E,$FF,$F8
Frame_347E80: dc.w 4
dc.b $E9, $A, 0, 0,$FF,$F1
dc.b $F9, 4, 0, 9, 0, 9
dc.b 1, $D, 0, $B,$FF,$F1
dc.b $11, 9, 0,$13,$FF,$E9
Frame_347E9A: dc.w 4
dc.b $EA, $F, 0, 0,$FF,$F3
dc.b $A, 8, 0,$10,$FF,$F3
dc.b $12, $C, 0,$13,$FF,$EB
dc.b $1A, 8, 0,$17,$FF,$EB
Frame_347EB4: dc.w 4
dc.b $EA, 8, 0, 0,$FF,$F5
dc.b $F2, $E, 0, 3,$FF,$ED
dc.b $A, 8, 0, $F,$FF,$F5
dc.b $12, $D, 0,$12,$FF,$F5
Frame_347ECE: dc.w 3
dc.b $EF, $F, 0, 0,$FF,$EC
dc.b $F, $C, 0,$10,$FF,$E4
dc.b $F, 8, 0,$14, 0, 4
Frame_347EE2: dc.w 4
dc.b $EF, $F, 0, 0,$FF,$EC
dc.b $F, $C, 0,$10,$FF,$E4
dc.b $F, 8, 0,$14, 0, 4
dc.b 7, 0, 0,$17, 0,$14
Frame_347EFC: dc.w 4
dc.b $EF, $F, 0, 0,$FF,$EC
dc.b 7, 4, 0,$10, 0, $C
dc.b $F, $C, 0,$12,$FF,$E4
dc.b $F, 0, 0,$16, 0, 4
Frame_347F16: dc.w 4
dc.b $F1, $E, 0, 0,$FF,$E5
dc.b $F1, 6, 0, $C, 0, 5
dc.b 9, $C, 0,$12,$FF,$ED
dc.b $11, $A, 0,$16,$FF,$ED
Frame_347F30: dc.w 4
dc.b $EB, $F, 0, 0,$FF,$F6
dc.b $F3, $A, 0,$10,$FF,$DE
dc.b $B, $C, 0,$19,$FF,$EE
dc.b $13, 9, 0,$1D,$FF,$F6
Frame_347F4A: dc.w 5
dc.b $EE, $F, 0, 0,$FF,$EC
dc.b $FE, 0, 0,$10, 0, $C
dc.b $E, $C, 0,$11,$FF,$E4
dc.b $E, 0, 0,$15, 0, 4
dc.b $16, $C, 0,$16,$FF,$FC
Frame_347F6A: dc.w 5
dc.b $EA, 8, 0, 0,$FF,$EE
dc.b $F2, $E, 0, 3,$FF,$EE
dc.b $A, $C, 0, $F,$FF,$E6
dc.b $12, $C, 0,$13,$FF,$EE
dc.b $1A, $C, 0,$17,$FF,$FE
| TeamASM-Blur/Sonic-3-Blue-Balls-Edition | Working Disassembly/General/Sprites/Sonic/Map - Sonic Snowboarding.asm | Assembly | apache-2.0 | 2,205 |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.3.1"/>
<title>Ilwis-Objects: util/box.h Source File</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/javascript">
$(document).ready(function() { searchBox.OnSelectItem(0); });
</script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<td id="projectlogo"><img alt="Logo" src="ilwisobjectsgeneral.PNG"/></td>
<td style="padding-left: 0.5em;">
<div id="projectname">Ilwis-Objects
 <span id="projectnumber">1.0</span>
</div>
<div id="projectbrief">GIS and Remote Sensing framework for data access and processing</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.3.1 -->
<script type="text/javascript">
var searchBox = new SearchBox("searchBox", "search",false,'Search');
</script>
<div id="navrow1" class="tabs">
<ul class="tablist">
<li><a href="index.html"><span>Main Page</span></a></li>
<li><a href="annotated.html"><span>Classes</span></a></li>
<li class="current"><a href="files.html"><span>Files</span></a></li>
<li>
<div id="MSearchBox" class="MSearchBoxInactive">
<span class="left">
<img id="MSearchSelect" src="search/mag_sel.png"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
alt=""/>
<input type="text" id="MSearchField" value="Search" accesskey="S"
onfocus="searchBox.OnSearchFieldFocus(true)"
onblur="searchBox.OnSearchFieldFocus(false)"
onkeyup="searchBox.OnSearchFieldChange(event)"/>
</span><span class="right">
<a id="MSearchClose" href="javascript:searchBox.CloseResultsWindow()"><img id="MSearchCloseImg" border="0" src="search/close.png" alt=""/></a>
</span>
</div>
</li>
</ul>
</div>
<div id="navrow2" class="tabs2">
<ul class="tablist">
<li><a href="files.html"><span>File List</span></a></li>
</ul>
</div>
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
<a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(0)"><span class="SelectionMark"> </span>All</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(1)"><span class="SelectionMark"> </span>Classes</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(2)"><span class="SelectionMark"> </span>Functions</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(3)"><span class="SelectionMark"> </span>Enumerations</a><a class="SelectItem" href="javascript:void(0)" onclick="searchBox.OnSelectItem(4)"><span class="SelectionMark"> </span>Pages</a></div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div id="nav-path" class="navpath">
<ul>
<li class="navelem"><a class="el" href="dir_23ec12649285f9fabf3a6b7380226c28.html">util</a></li> </ul>
</div>
</div><!-- top -->
<div class="header">
<div class="headertitle">
<div class="title">box.h</div> </div>
</div><!--header-->
<div class="contents">
<div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno"> 1</span> <span class="preprocessor">#ifndef BOX_H</span></div>
<div class="line"><a name="l00002"></a><span class="lineno"> 2</span> <span class="preprocessor"></span><span class="preprocessor">#define BOX_H</span></div>
<div class="line"><a name="l00003"></a><span class="lineno"> 3</span> <span class="preprocessor"></span></div>
<div class="line"><a name="l00004"></a><span class="lineno"> 4</span> <span class="preprocessor">#include <QSize></span></div>
<div class="line"><a name="l00005"></a><span class="lineno"> 5</span> <span class="preprocessor">#include "size.h"</span></div>
<div class="line"><a name="l00006"></a><span class="lineno"> 6</span> <span class="preprocessor">#include "errmessages.h"</span></div>
<div class="line"><a name="l00007"></a><span class="lineno"> 7</span> <span class="preprocessor">#include "range.h"</span></div>
<div class="line"><a name="l00008"></a><span class="lineno"> 8</span> </div>
<div class="line"><a name="l00009"></a><span class="lineno"> 9</span> <span class="keyword">namespace </span>Ilwis {</div>
<div class="line"><a name="l00014"></a><span class="lineno"> 14</span> <span class="keyword">template</span><<span class="keyword">class</span> Po<span class="keywordtype">int</span>Type=Coordinate> <span class="keyword">class </span>Box : <span class="keyword">public</span> Range{</div>
<div class="line"><a name="l00015"></a><span class="lineno"> 15</span> <span class="keyword">public</span>:</div>
<div class="line"><a name="l00016"></a><span class="lineno"> 16</span>  <span class="keyword">enum</span> Dimension{dim0=0, dimX=1, dimY=2, dimZ=4};</div>
<div class="line"><a name="l00017"></a><span class="lineno"> 17</span> </div>
<div class="line"><a name="l00018"></a><span class="lineno"> 18</span>  Box() : _min_corner(PointType(0,0,0)), _max_corner(PointType(0,0,0)){</div>
<div class="line"><a name="l00019"></a><span class="lineno"> 19</span>  }</div>
<div class="line"><a name="l00020"></a><span class="lineno"> 20</span> </div>
<div class="line"><a name="l00021"></a><span class="lineno"> 21</span>  Box(<span class="keyword">const</span> PointType& pMin, <span class="keyword">const</span> PointType& pMax) : _min_corner(pMin), _max_corner(pMax){</div>
<div class="line"><a name="l00022"></a><span class="lineno"> 22</span>  normalize();</div>
<div class="line"><a name="l00023"></a><span class="lineno"> 23</span>  }</div>
<div class="line"><a name="l00024"></a><span class="lineno"> 24</span> </div>
<div class="line"><a name="l00025"></a><span class="lineno"> 25</span>  Box(<span class="keyword">const</span> Box<PointType>& bx) : _min_corner(bx.min_corner()), _max_corner(bx.max_corner()) {</div>
<div class="line"><a name="l00026"></a><span class="lineno"> 26</span> </div>
<div class="line"><a name="l00027"></a><span class="lineno"> 27</span>  }</div>
<div class="line"><a name="l00028"></a><span class="lineno"> 28</span> </div>
<div class="line"><a name="l00029"></a><span class="lineno"> 29</span>  Box(Box<PointType>&& box) :</div>
<div class="line"><a name="l00030"></a><span class="lineno"> 30</span>  _min_corner(std::move(box._min_corner)),</div>
<div class="line"><a name="l00031"></a><span class="lineno"> 31</span>  _max_corner(std::move(box._max_corner))</div>
<div class="line"><a name="l00032"></a><span class="lineno"> 32</span>  {</div>
<div class="line"><a name="l00033"></a><span class="lineno"> 33</span>  box._min_corner = box._max_corner = PointType();</div>
<div class="line"><a name="l00034"></a><span class="lineno"> 34</span>  }</div>
<div class="line"><a name="l00035"></a><span class="lineno"> 35</span> </div>
<div class="line"><a name="l00036"></a><span class="lineno"> 36</span>  Box(<span class="keyword">const</span> QSize& sz) : _min_corner(PointType(0,0,0)),_max_corner(PointType(sz.width()-1, sz.height()-1),0){</div>
<div class="line"><a name="l00037"></a><span class="lineno"> 37</span>  }</div>
<div class="line"><a name="l00038"></a><span class="lineno"> 38</span> </div>
<div class="line"><a name="l00039"></a><span class="lineno"> 39</span>  <span class="keyword">template</span><<span class="keyword">typename</span> T> Box(<span class="keyword">const</span> Size<T>& sz) : _min_corner(PointType(0,0,0)),_max_corner(PointType(sz.xsize()-1, sz.ysize()-1,sz.zsize()-1)){</div>
<div class="line"><a name="l00040"></a><span class="lineno"> 40</span>  }</div>
<div class="line"><a name="l00041"></a><span class="lineno"> 41</span> </div>
<div class="line"><a name="l00046"></a><span class="lineno"><a class="code" href="class_ilwis_1_1_box.html#adec0fd6f9fb44de378ca4766f70dd779"> 46</a></span>  <a class="code" href="class_ilwis_1_1_box.html#adec0fd6f9fb44de378ca4766f70dd779">Box</a>(<span class="keyword">const</span> QString& envelope) : _min_corner(PointType(0,0)), _max_corner(PointType(0,0)){</div>
<div class="line"><a name="l00047"></a><span class="lineno"> 47</span>  <span class="keywordtype">int</span> index1 = envelope.indexOf(<span class="stringliteral">"("</span>);</div>
<div class="line"><a name="l00048"></a><span class="lineno"> 48</span>  <span class="keywordflow">if</span> ( index1 != -1) {</div>
<div class="line"><a name="l00049"></a><span class="lineno"> 49</span>  <span class="keywordtype">int</span> index2 = envelope.indexOf(<span class="stringliteral">")"</span>) ;</div>
<div class="line"><a name="l00050"></a><span class="lineno"> 50</span>  <span class="keywordflow">if</span> ( index2 == -1){</div>
<div class="line"><a name="l00051"></a><span class="lineno"> 51</span>  <span class="keywordflow">return</span>;</div>
<div class="line"><a name="l00052"></a><span class="lineno"> 52</span>  }</div>
<div class="line"><a name="l00053"></a><span class="lineno"> 53</span> </div>
<div class="line"><a name="l00054"></a><span class="lineno"> 54</span>  QString coords = envelope.mid(index1+1, index2 - index1 - 1);</div>
<div class="line"><a name="l00055"></a><span class="lineno"> 55</span>  coords = coords.trimmed();</div>
<div class="line"><a name="l00056"></a><span class="lineno"> 56</span>  QStringList parts = coords.split(<span class="stringliteral">","</span>);</div>
<div class="line"><a name="l00057"></a><span class="lineno"> 57</span>  <span class="keywordflow">if</span> ( parts.size() != 2){</div>
<div class="line"><a name="l00058"></a><span class="lineno"> 58</span>  <span class="keywordflow">return</span>;</div>
<div class="line"><a name="l00059"></a><span class="lineno"> 59</span>  }</div>
<div class="line"><a name="l00060"></a><span class="lineno"> 60</span>  QStringList p1 = parts[0].trimmed().split(<span class="charliteral">' '</span>);</div>
<div class="line"><a name="l00061"></a><span class="lineno"> 61</span>  <span class="keywordflow">if</span> ( p1.size() < 2)</div>
<div class="line"><a name="l00062"></a><span class="lineno"> 62</span>  <span class="keywordflow">return</span>;</div>
<div class="line"><a name="l00063"></a><span class="lineno"> 63</span>  this->min_corner().x = p1[0].trimmed().toDouble();</div>
<div class="line"><a name="l00064"></a><span class="lineno"> 64</span>  this->min_corner().y = p1[1].trimmed().toDouble();</div>
<div class="line"><a name="l00065"></a><span class="lineno"> 65</span>  <span class="keywordflow">if</span> ( p1.size() == 3)</div>
<div class="line"><a name="l00066"></a><span class="lineno"> 66</span>  this->min_corner().z = p1[2].trimmed().toDouble();</div>
<div class="line"><a name="l00067"></a><span class="lineno"> 67</span> </div>
<div class="line"><a name="l00068"></a><span class="lineno"> 68</span>  QStringList p2 = parts[1].trimmed().split(<span class="charliteral">' '</span>);</div>
<div class="line"><a name="l00069"></a><span class="lineno"> 69</span>  <span class="keywordflow">if</span> ( p1.size() < 2) {</div>
<div class="line"><a name="l00070"></a><span class="lineno"> 70</span>  this->min_corner().x = 0;</div>
<div class="line"><a name="l00071"></a><span class="lineno"> 71</span>  this->min_corner().y = 0;</div>
<div class="line"><a name="l00072"></a><span class="lineno"> 72</span>  this->min_corner().z = 0;</div>
<div class="line"><a name="l00073"></a><span class="lineno"> 73</span>  <span class="keywordflow">return</span>;</div>
<div class="line"><a name="l00074"></a><span class="lineno"> 74</span>  }</div>
<div class="line"><a name="l00075"></a><span class="lineno"> 75</span>  this->max_corner().x = p2[0].trimmed().toDouble();</div>
<div class="line"><a name="l00076"></a><span class="lineno"> 76</span>  this->max_corner().y = p2[1].trimmed().toDouble();</div>
<div class="line"><a name="l00077"></a><span class="lineno"> 77</span>  <span class="keywordflow">if</span> ( p2.size() == 3)</div>
<div class="line"><a name="l00078"></a><span class="lineno"> 78</span>  this->max_corner().z = p2[2].trimmed().toDouble();</div>
<div class="line"><a name="l00079"></a><span class="lineno"> 79</span>  }</div>
<div class="line"><a name="l00080"></a><span class="lineno"> 80</span>  }</div>
<div class="line"><a name="l00081"></a><span class="lineno"> 81</span> </div>
<div class="line"><a name="l00082"></a><span class="lineno"><a class="code" href="class_ilwis_1_1_box.html#ab0511c11ae04999d283f67c6ea27cee4"> 82</a></span>  IlwisTypes <a class="code" href="class_ilwis_1_1_box.html#ab0511c11ae04999d283f67c6ea27cee4" title="valueType returns the type of values contained in the range">valueType</a>()<span class="keyword"> const</span>{</div>
<div class="line"><a name="l00083"></a><span class="lineno"> 83</span>  <span class="keywordflow">return</span> max_corner().valuetype();</div>
<div class="line"><a name="l00084"></a><span class="lineno"> 84</span>  }</div>
<div class="line"><a name="l00085"></a><span class="lineno"> 85</span> </div>
<div class="line"><a name="l00086"></a><span class="lineno"><a class="code" href="class_ilwis_1_1_box.html#ab8771e4e5dda06e115eba2750ec9c255"> 86</a></span>  <a class="code" href="class_ilwis_1_1_range.html" title="The Range class base interface for all objects that need to define a range of values.">Range</a> *<a class="code" href="class_ilwis_1_1_box.html#ab8771e4e5dda06e115eba2750ec9c255">clone</a>()<span class="keyword"> const</span>{</div>
<div class="line"><a name="l00087"></a><span class="lineno"> 87</span>  <span class="keywordflow">return</span> <span class="keyword">new</span> <a class="code" href="class_ilwis_1_1_box.html">Box<PointType></a>(*this);</div>
<div class="line"><a name="l00088"></a><span class="lineno"> 88</span>  }</div>
<div class="line"><a name="l00089"></a><span class="lineno"> 89</span> </div>
<div class="line"><a name="l00090"></a><span class="lineno"> 90</span> </div>
<div class="line"><a name="l00091"></a><span class="lineno"> 91</span>  PointType min_corner()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00092"></a><span class="lineno"> 92</span>  <span class="keywordflow">return</span> _min_corner;</div>
<div class="line"><a name="l00093"></a><span class="lineno"> 93</span>  }</div>
<div class="line"><a name="l00094"></a><span class="lineno"> 94</span> </div>
<div class="line"><a name="l00095"></a><span class="lineno"> 95</span>  PointType max_corner()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00096"></a><span class="lineno"> 96</span>  <span class="keywordflow">return</span> _max_corner;</div>
<div class="line"><a name="l00097"></a><span class="lineno"> 97</span>  }</div>
<div class="line"><a name="l00098"></a><span class="lineno"> 98</span> </div>
<div class="line"><a name="l00099"></a><span class="lineno"> 99</span>  PointType& min_corner() {</div>
<div class="line"><a name="l00100"></a><span class="lineno"> 100</span>  <span class="keywordflow">return</span> _min_corner;</div>
<div class="line"><a name="l00101"></a><span class="lineno"> 101</span>  }</div>
<div class="line"><a name="l00102"></a><span class="lineno"> 102</span> </div>
<div class="line"><a name="l00103"></a><span class="lineno"> 103</span>  PointType& max_corner() {</div>
<div class="line"><a name="l00104"></a><span class="lineno"> 104</span>  <span class="keywordflow">return</span> _max_corner;</div>
<div class="line"><a name="l00105"></a><span class="lineno"> 105</span>  }</div>
<div class="line"><a name="l00106"></a><span class="lineno"> 106</span> </div>
<div class="line"><a name="l00107"></a><span class="lineno"> 107</span>  <span class="keywordtype">double</span> xlength()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00108"></a><span class="lineno"> 108</span>  <span class="keywordflow">return</span> std::abs(this->min_corner().x - this->max_corner().x) + 1;</div>
<div class="line"><a name="l00109"></a><span class="lineno"> 109</span>  }</div>
<div class="line"><a name="l00110"></a><span class="lineno"> 110</span> </div>
<div class="line"><a name="l00111"></a><span class="lineno"> 111</span>  <span class="keywordtype">double</span> ylength()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00112"></a><span class="lineno"> 112</span>  <span class="keywordflow">return</span> std::abs(this->min_corner().y - this->max_corner().y) + 1;</div>
<div class="line"><a name="l00113"></a><span class="lineno"> 113</span>  }</div>
<div class="line"><a name="l00114"></a><span class="lineno"> 114</span> </div>
<div class="line"><a name="l00115"></a><span class="lineno"> 115</span>  <span class="keywordtype">double</span> zlength()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00116"></a><span class="lineno"> 116</span>  <span class="keywordflow">return</span> std::abs(this->min_corner().z - this->max_corner().z) + 1;</div>
<div class="line"><a name="l00117"></a><span class="lineno"> 117</span>  }</div>
<div class="line"><a name="l00118"></a><span class="lineno"> 118</span> </div>
<div class="line"><a name="l00119"></a><span class="lineno"> 119</span>  <span class="keyword">template</span><<span class="keyword">typename</span> T=qu<span class="keywordtype">int</span>32> Size<T> size()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00120"></a><span class="lineno"> 120</span>  <span class="keywordflow">return</span> Size<T>(xlength(), ylength(), zlength());</div>
<div class="line"><a name="l00121"></a><span class="lineno"> 121</span>  }</div>
<div class="line"><a name="l00122"></a><span class="lineno"> 122</span> </div>
<div class="line"><a name="l00123"></a><span class="lineno"> 123</span>  <span class="keywordtype">bool</span> is3D()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00124"></a><span class="lineno"> 124</span>  <span class="keywordflow">return</span> this->min_corner().is3D() && this->max_corner().is3D();</div>
<div class="line"><a name="l00125"></a><span class="lineno"> 125</span>  }</div>
<div class="line"><a name="l00126"></a><span class="lineno"> 126</span>  quint64 area()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00127"></a><span class="lineno"> 127</span>  <span class="keywordflow">if</span> ( !<a class="code" href="class_ilwis_1_1_box.html#a3da68f18ac3a077d737db00348fca990">isValid</a>())</div>
<div class="line"><a name="l00128"></a><span class="lineno"> 128</span>  <span class="keywordflow">return</span> 0;</div>
<div class="line"><a name="l00129"></a><span class="lineno"> 129</span>  <span class="keywordflow">return</span> xlength() * ylength();</div>
<div class="line"><a name="l00130"></a><span class="lineno"> 130</span>  }</div>
<div class="line"><a name="l00131"></a><span class="lineno"> 131</span> </div>
<div class="line"><a name="l00132"></a><span class="lineno"> 132</span>  quint64 volume()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00133"></a><span class="lineno"> 133</span>  <span class="keywordflow">if</span> (!is3D())</div>
<div class="line"><a name="l00134"></a><span class="lineno"> 134</span>  <span class="keywordflow">return</span> area();</div>
<div class="line"><a name="l00135"></a><span class="lineno"> 135</span>  <span class="keywordflow">return</span> xlength() * ylength() * zlength();</div>
<div class="line"><a name="l00136"></a><span class="lineno"> 136</span>  }</div>
<div class="line"><a name="l00137"></a><span class="lineno"> 137</span> </div>
<div class="line"><a name="l00138"></a><span class="lineno"> 138</span>  <span class="keywordtype">bool</span> contains(<span class="keyword">const</span> PointType& p)<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00139"></a><span class="lineno"> 139</span>  <span class="keywordflow">if</span> (!p.isValid())</div>
<div class="line"><a name="l00140"></a><span class="lineno"> 140</span>  <span class="keywordflow">return</span> <span class="keyword">false</span>;</div>
<div class="line"><a name="l00141"></a><span class="lineno"> 141</span>  <span class="keywordflow">if</span>(!<a class="code" href="class_ilwis_1_1_box.html#a3da68f18ac3a077d737db00348fca990">isValid</a>())</div>
<div class="line"><a name="l00142"></a><span class="lineno"> 142</span>  <span class="keywordflow">return</span> <span class="keyword">false</span>;</div>
<div class="line"><a name="l00143"></a><span class="lineno"> 143</span> </div>
<div class="line"><a name="l00144"></a><span class="lineno"> 144</span>  <span class="keyword">const</span> PointType& pmin = this->min_corner();</div>
<div class="line"><a name="l00145"></a><span class="lineno"> 145</span>  <span class="keyword">const</span> PointType& pmax = this->max_corner();</div>
<div class="line"><a name="l00146"></a><span class="lineno"> 146</span>  <span class="keywordtype">bool</span> ok = p.x >= pmin.x && p.x <= pmax.x &&</div>
<div class="line"><a name="l00147"></a><span class="lineno"> 147</span>  p.y >= pmin.y && p.y <= pmax.y;</div>
<div class="line"><a name="l00148"></a><span class="lineno"> 148</span>  <span class="keywordflow">if</span> ( is3D() && p.is3D()) {</div>
<div class="line"><a name="l00149"></a><span class="lineno"> 149</span>  ok = p.z >= pmin.z && p.z <= pmax.z;</div>
<div class="line"><a name="l00150"></a><span class="lineno"> 150</span>  }</div>
<div class="line"><a name="l00151"></a><span class="lineno"> 151</span>  <span class="keywordflow">return</span> ok;</div>
<div class="line"><a name="l00152"></a><span class="lineno"> 152</span>  }</div>
<div class="line"><a name="l00153"></a><span class="lineno"> 153</span> </div>
<div class="line"><a name="l00154"></a><span class="lineno"> 154</span>  <span class="keywordtype">bool</span> contains(Box<PointType>& box)<span class="keyword"> const</span>{</div>
<div class="line"><a name="l00155"></a><span class="lineno"> 155</span>  <span class="keywordflow">return</span> contains(box.min_corner()) && contains(box.max_corner());</div>
<div class="line"><a name="l00156"></a><span class="lineno"> 156</span>  }</div>
<div class="line"><a name="l00157"></a><span class="lineno"> 157</span> </div>
<div class="line"><a name="l00158"></a><span class="lineno"> 158</span>  <span class="keywordtype">bool</span> contains(<span class="keyword">const</span> QVariant& value, <span class="keywordtype">bool</span> inclusive = <span class="keyword">true</span>)<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00159"></a><span class="lineno"> 159</span>  <span class="comment">//TODO:</span></div>
<div class="line"><a name="l00160"></a><span class="lineno"> 160</span>  <span class="keywordflow">return</span> <span class="keyword">false</span>;</div>
<div class="line"><a name="l00161"></a><span class="lineno"> 161</span>  }</div>
<div class="line"><a name="l00162"></a><span class="lineno"> 162</span> </div>
<div class="line"><a name="l00163"></a><span class="lineno"> 163</span>  <span class="keywordtype">bool</span> equals(Box<PointType>& box, <span class="keywordtype">double</span> delta=0)<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00164"></a><span class="lineno"> 164</span>  <span class="keywordflow">if</span> ( !box.isValid())</div>
<div class="line"><a name="l00165"></a><span class="lineno"> 165</span>  <span class="keywordflow">return</span> <span class="keyword">false</span>;</div>
<div class="line"><a name="l00166"></a><span class="lineno"> 166</span>  <span class="keywordflow">if</span> (!<a class="code" href="class_ilwis_1_1_box.html#a3da68f18ac3a077d737db00348fca990">isValid</a>())</div>
<div class="line"><a name="l00167"></a><span class="lineno"> 167</span>  <span class="keywordflow">return</span> <span class="keyword">false</span>;</div>
<div class="line"><a name="l00168"></a><span class="lineno"> 168</span> </div>
<div class="line"><a name="l00169"></a><span class="lineno"> 169</span>  <span class="keyword">const</span> PointType& pmin = box.min_corner();</div>
<div class="line"><a name="l00170"></a><span class="lineno"> 170</span>  <span class="keyword">const</span> PointType& pmax = box.max_corner();</div>
<div class="line"><a name="l00171"></a><span class="lineno"> 171</span> </div>
<div class="line"><a name="l00172"></a><span class="lineno"> 172</span>  <span class="keywordflow">if</span> ( std::abs( min_corner.x - pmin.x) > delta)</div>
<div class="line"><a name="l00173"></a><span class="lineno"> 173</span>  <span class="keywordflow">return</span> <span class="keyword">false</span>;</div>
<div class="line"><a name="l00174"></a><span class="lineno"> 174</span>  <span class="keywordflow">if</span> ( std::abs( min_corner.y - pmin.y) > delta)</div>
<div class="line"><a name="l00175"></a><span class="lineno"> 175</span>  <span class="keywordflow">return</span> <span class="keyword">false</span>;</div>
<div class="line"><a name="l00176"></a><span class="lineno"> 176</span>  <span class="keywordflow">if</span> ( std::abs( max_corner.x - pmax.x) > delta)</div>
<div class="line"><a name="l00177"></a><span class="lineno"> 177</span>  <span class="keywordflow">return</span> <span class="keyword">false</span>;</div>
<div class="line"><a name="l00178"></a><span class="lineno"> 178</span>  <span class="keywordflow">if</span> ( std::abs( max_corner.y - pmax.y) > delta)</div>
<div class="line"><a name="l00179"></a><span class="lineno"> 179</span>  <span class="keywordflow">return</span> <span class="keyword">false</span>;</div>
<div class="line"><a name="l00180"></a><span class="lineno"> 180</span>  <span class="keywordflow">if</span> ( is3D() && box.is3D()) {</div>
<div class="line"><a name="l00181"></a><span class="lineno"> 181</span>  <span class="keywordflow">if</span> ( std::abs( min_corner.z - pmin.z) > delta)</div>
<div class="line"><a name="l00182"></a><span class="lineno"> 182</span>  <span class="keywordflow">return</span> <span class="keyword">false</span>;</div>
<div class="line"><a name="l00183"></a><span class="lineno"> 183</span>  <span class="keywordflow">if</span> ( std::abs( max_corner.z - pmax.z) > delta)</div>
<div class="line"><a name="l00184"></a><span class="lineno"> 184</span>  <span class="keywordflow">return</span> <span class="keyword">false</span>;</div>
<div class="line"><a name="l00185"></a><span class="lineno"> 185</span>  }</div>
<div class="line"><a name="l00186"></a><span class="lineno"> 186</span>  <span class="keywordflow">return</span> <span class="keyword">true</span>;</div>
<div class="line"><a name="l00187"></a><span class="lineno"> 187</span>  }</div>
<div class="line"><a name="l00188"></a><span class="lineno"> 188</span> </div>
<div class="line"><a name="l00189"></a><span class="lineno"><a class="code" href="class_ilwis_1_1_box.html#a3da68f18ac3a077d737db00348fca990"> 189</a></span>  <span class="keywordtype">bool</span> <a class="code" href="class_ilwis_1_1_box.html#a3da68f18ac3a077d737db00348fca990">isValid</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00190"></a><span class="lineno"> 190</span>  <span class="keywordflow">return</span> this->min_corner().isValid() && this->max_corner().isValid();</div>
<div class="line"><a name="l00191"></a><span class="lineno"> 191</span>  }</div>
<div class="line"><a name="l00192"></a><span class="lineno"> 192</span> </div>
<div class="line"><a name="l00193"></a><span class="lineno"> 193</span>  <span class="keywordtype">bool</span> isNull()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00194"></a><span class="lineno"> 194</span>  <span class="keywordtype">bool</span> ok = this->min_corner().x == 0 && this->min_corner().y == 0 &&</div>
<div class="line"><a name="l00195"></a><span class="lineno"> 195</span>  this->max_corner().x == 0 && this->max_corner().y == 0;</div>
<div class="line"><a name="l00196"></a><span class="lineno"> 196</span>  <span class="keywordflow">if</span> ( is3D()){</div>
<div class="line"><a name="l00197"></a><span class="lineno"> 197</span>  ok &= this->min_corner().z == 0 && this->max_corner().z == 0;</div>
<div class="line"><a name="l00198"></a><span class="lineno"> 198</span>  }</div>
<div class="line"><a name="l00199"></a><span class="lineno"> 199</span>  <span class="keywordflow">return</span> ok;</div>
<div class="line"><a name="l00200"></a><span class="lineno"> 200</span>  }</div>
<div class="line"><a name="l00201"></a><span class="lineno"> 201</span> </div>
<div class="line"><a name="l00202"></a><span class="lineno"> 202</span>  Box<PointType>& operator=(Box<PointType>&& box) {</div>
<div class="line"><a name="l00203"></a><span class="lineno"> 203</span>  _min_corner = std::move(box._min_corner);</div>
<div class="line"><a name="l00204"></a><span class="lineno"> 204</span>  _max_corner = std::move(box._max_corner);</div>
<div class="line"><a name="l00205"></a><span class="lineno"> 205</span> </div>
<div class="line"><a name="l00206"></a><span class="lineno"> 206</span>  box._min_corner = box._max_corner = PointType();</div>
<div class="line"><a name="l00207"></a><span class="lineno"> 207</span>  <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00208"></a><span class="lineno"> 208</span>  }</div>
<div class="line"><a name="l00209"></a><span class="lineno"> 209</span> </div>
<div class="line"><a name="l00210"></a><span class="lineno"> 210</span>  Box<PointType>& operator=(<span class="keyword">const</span> Box<PointType>& box) {</div>
<div class="line"><a name="l00211"></a><span class="lineno"> 211</span>  _min_corner = std::move(box._min_corner);</div>
<div class="line"><a name="l00212"></a><span class="lineno"> 212</span>  _max_corner = std::move(box._max_corner);</div>
<div class="line"><a name="l00213"></a><span class="lineno"> 213</span>  <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00214"></a><span class="lineno"> 214</span>  }</div>
<div class="line"><a name="l00215"></a><span class="lineno"> 215</span> </div>
<div class="line"><a name="l00216"></a><span class="lineno"> 216</span>  Box<PointType>& operator +=(<span class="keyword">const</span> <span class="keywordtype">double</span>& v) {</div>
<div class="line"><a name="l00217"></a><span class="lineno"> 217</span>  <span class="keywordflow">if</span> ( isNumericalUndef(v))</div>
<div class="line"><a name="l00218"></a><span class="lineno"> 218</span>  <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00219"></a><span class="lineno"> 219</span> </div>
<div class="line"><a name="l00220"></a><span class="lineno"> 220</span>  PointType& pmin = this->min_corner();</div>
<div class="line"><a name="l00221"></a><span class="lineno"> 221</span>  PointType& pmax = this->max_corner();</div>
<div class="line"><a name="l00222"></a><span class="lineno"> 222</span>  pmin -= v;</div>
<div class="line"><a name="l00223"></a><span class="lineno"> 223</span>  pmax += v;</div>
<div class="line"><a name="l00224"></a><span class="lineno"> 224</span>  normalize();</div>
<div class="line"><a name="l00225"></a><span class="lineno"> 225</span>  }</div>
<div class="line"><a name="l00226"></a><span class="lineno"> 226</span> </div>
<div class="line"><a name="l00227"></a><span class="lineno"> 227</span>  Box<PointType>& operator *=(<span class="keyword">const</span> <span class="keywordtype">double</span>& v) {</div>
<div class="line"><a name="l00228"></a><span class="lineno"> 228</span>  <span class="keywordflow">if</span> ( isNumericalUndef(v))</div>
<div class="line"><a name="l00229"></a><span class="lineno"> 229</span>  <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00230"></a><span class="lineno"> 230</span>  PointType& pmin = this->min_corner();</div>
<div class="line"><a name="l00231"></a><span class="lineno"> 231</span>  PointType& pmax = this->max_corner();</div>
<div class="line"><a name="l00232"></a><span class="lineno"> 232</span>  <span class="keywordtype">double</span> deltaX = xlength() * v / 2;</div>
<div class="line"><a name="l00233"></a><span class="lineno"> 233</span>  <span class="keywordtype">double</span> deltaY = ylength() * v / 2;</div>
<div class="line"><a name="l00234"></a><span class="lineno"> 234</span>  <span class="keywordtype">double</span> deltaZ = 1;</div>
<div class="line"><a name="l00235"></a><span class="lineno"> 235</span>  <span class="keywordflow">if</span> ( is3D())</div>
<div class="line"><a name="l00236"></a><span class="lineno"> 236</span>  deltaZ = zlength() * v / 2;</div>
<div class="line"><a name="l00237"></a><span class="lineno"> 237</span>  pmin *= {deltaX, deltaY, deltaZ};</div>
<div class="line"><a name="l00238"></a><span class="lineno"> 238</span>  pmax *= {deltaX, deltaY, deltaZ};</div>
<div class="line"><a name="l00239"></a><span class="lineno"> 239</span>  normalize();</div>
<div class="line"><a name="l00240"></a><span class="lineno"> 240</span>  }</div>
<div class="line"><a name="l00241"></a><span class="lineno"> 241</span> </div>
<div class="line"><a name="l00242"></a><span class="lineno"> 242</span> Box<PointType>& operator +=(<span class="keyword">const</span> PointType& pnew) {</div>
<div class="line"><a name="l00243"></a><span class="lineno"> 243</span>  <span class="keywordflow">if</span> ( !pnew.isValid())</div>
<div class="line"><a name="l00244"></a><span class="lineno"> 244</span>  <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00245"></a><span class="lineno"> 245</span> </div>
<div class="line"><a name="l00246"></a><span class="lineno"> 246</span> </div>
<div class="line"><a name="l00247"></a><span class="lineno"> 247</span> </div>
<div class="line"><a name="l00248"></a><span class="lineno"> 248</span>  PointType& pmin = this->min_corner();</div>
<div class="line"><a name="l00249"></a><span class="lineno"> 249</span>  PointType& pmax = this->max_corner();</div>
<div class="line"><a name="l00250"></a><span class="lineno"> 250</span>  <span class="keywordflow">if</span> ( isNull() || !<a class="code" href="class_ilwis_1_1_box.html#a3da68f18ac3a077d737db00348fca990">isValid</a>()) {</div>
<div class="line"><a name="l00251"></a><span class="lineno"> 251</span>  pmin = pnew;</div>
<div class="line"><a name="l00252"></a><span class="lineno"> 252</span>  pmax = pnew;</div>
<div class="line"><a name="l00253"></a><span class="lineno"> 253</span>  <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00254"></a><span class="lineno"> 254</span>  }</div>
<div class="line"><a name="l00255"></a><span class="lineno"> 255</span> </div>
<div class="line"><a name="l00256"></a><span class="lineno"> 256</span>  <span class="keywordflow">if</span> ( contains(pnew))</div>
<div class="line"><a name="l00257"></a><span class="lineno"> 257</span>  <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00258"></a><span class="lineno"> 258</span>  <span class="keywordflow">if</span> ( pmin.x > pnew.x)</div>
<div class="line"><a name="l00259"></a><span class="lineno"> 259</span>  pmin.x = pnew.x;</div>
<div class="line"><a name="l00260"></a><span class="lineno"> 260</span>  <span class="keywordflow">if</span> ( pmin.y > pnew.y)</div>
<div class="line"><a name="l00261"></a><span class="lineno"> 261</span>  pmin.y = pnew.y;</div>
<div class="line"><a name="l00262"></a><span class="lineno"> 262</span>  <span class="keywordflow">if</span> ( pmax.x < pnew.x)</div>
<div class="line"><a name="l00263"></a><span class="lineno"> 263</span>  pmax.x = pnew.x;</div>
<div class="line"><a name="l00264"></a><span class="lineno"> 264</span>  <span class="keywordflow">if</span> ( pmax.y < pnew.y)</div>
<div class="line"><a name="l00265"></a><span class="lineno"> 265</span>  pmax.y = pnew.y;</div>
<div class="line"><a name="l00266"></a><span class="lineno"> 266</span>  <span class="keywordflow">if</span> ( is3D() && pnew.is3D()){</div>
<div class="line"><a name="l00267"></a><span class="lineno"> 267</span>  <span class="keywordflow">if</span> ( pmin.z > pnew.z)</div>
<div class="line"><a name="l00268"></a><span class="lineno"> 268</span>  pmin.z = pnew.z;</div>
<div class="line"><a name="l00269"></a><span class="lineno"> 269</span>  <span class="keywordflow">if</span> ( pmax.z < pnew.z)</div>
<div class="line"><a name="l00270"></a><span class="lineno"> 270</span>  pmax.z = pnew.z;</div>
<div class="line"><a name="l00271"></a><span class="lineno"> 271</span>  }</div>
<div class="line"><a name="l00272"></a><span class="lineno"> 272</span>  normalize();</div>
<div class="line"><a name="l00273"></a><span class="lineno"> 273</span> </div>
<div class="line"><a name="l00274"></a><span class="lineno"> 274</span>  <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00275"></a><span class="lineno"> 275</span> </div>
<div class="line"><a name="l00276"></a><span class="lineno"> 276</span> }</div>
<div class="line"><a name="l00277"></a><span class="lineno"> 277</span> </div>
<div class="line"><a name="l00278"></a><span class="lineno"> 278</span> Box<PointType>& operator -=(<span class="keyword">const</span> PointType& pnew) {</div>
<div class="line"><a name="l00279"></a><span class="lineno"> 279</span>  <span class="keywordflow">if</span> ( !pnew.isValid())</div>
<div class="line"><a name="l00280"></a><span class="lineno"> 280</span>  <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00281"></a><span class="lineno"> 281</span> </div>
<div class="line"><a name="l00282"></a><span class="lineno"> 282</span>  PointType& pmin = this->min_corner();</div>
<div class="line"><a name="l00283"></a><span class="lineno"> 283</span>  PointType& pmax = this->max_corner();</div>
<div class="line"><a name="l00284"></a><span class="lineno"> 284</span> </div>
<div class="line"><a name="l00285"></a><span class="lineno"> 285</span>  <span class="keywordflow">if</span> ( isNull() || !<a class="code" href="class_ilwis_1_1_box.html#a3da68f18ac3a077d737db00348fca990">isValid</a>()) {</div>
<div class="line"><a name="l00286"></a><span class="lineno"> 286</span>  pmin = pnew;</div>
<div class="line"><a name="l00287"></a><span class="lineno"> 287</span>  pmax = pnew;</div>
<div class="line"><a name="l00288"></a><span class="lineno"> 288</span>  <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00289"></a><span class="lineno"> 289</span>  }</div>
<div class="line"><a name="l00290"></a><span class="lineno"> 290</span> </div>
<div class="line"><a name="l00291"></a><span class="lineno"> 291</span>  <span class="keywordflow">if</span> ( !contains(pnew))</div>
<div class="line"><a name="l00292"></a><span class="lineno"> 292</span>  <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00293"></a><span class="lineno"> 293</span>  <span class="keywordflow">if</span> ( pmin.x() < pnew.x())</div>
<div class="line"><a name="l00294"></a><span class="lineno"> 294</span>  pmin.x = pnew.x();</div>
<div class="line"><a name="l00295"></a><span class="lineno"> 295</span>  <span class="keywordflow">if</span> ( pmin.y < pnew.y)</div>
<div class="line"><a name="l00296"></a><span class="lineno"> 296</span>  pmin.y = pnew.y();</div>
<div class="line"><a name="l00297"></a><span class="lineno"> 297</span>  <span class="keywordflow">if</span> ( pmax.x > pnew.x)</div>
<div class="line"><a name="l00298"></a><span class="lineno"> 298</span>  pmax.x = pnew.x();</div>
<div class="line"><a name="l00299"></a><span class="lineno"> 299</span>  <span class="keywordflow">if</span> ( pmax.y > pnew.y)</div>
<div class="line"><a name="l00300"></a><span class="lineno"> 300</span>  pmax.y = pnew.y();</div>
<div class="line"><a name="l00301"></a><span class="lineno"> 301</span>  <span class="keywordflow">if</span> ( is3D() && pnew.is3D()){</div>
<div class="line"><a name="l00302"></a><span class="lineno"> 302</span>  <span class="keywordflow">if</span> ( pmin.z < pnew.z)</div>
<div class="line"><a name="l00303"></a><span class="lineno"> 303</span>  pmin.z = pnew.z;</div>
<div class="line"><a name="l00304"></a><span class="lineno"> 304</span>  <span class="keywordflow">if</span> ( pmax.z > pnew.z)</div>
<div class="line"><a name="l00305"></a><span class="lineno"> 305</span>  pmax.z = pnew.z;</div>
<div class="line"><a name="l00306"></a><span class="lineno"> 306</span>  }</div>
<div class="line"><a name="l00307"></a><span class="lineno"> 307</span>  normalize();</div>
<div class="line"><a name="l00308"></a><span class="lineno"> 308</span> </div>
<div class="line"><a name="l00309"></a><span class="lineno"> 309</span>  <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00310"></a><span class="lineno"> 310</span> </div>
<div class="line"><a name="l00311"></a><span class="lineno"> 311</span> }</div>
<div class="line"><a name="l00312"></a><span class="lineno"> 312</span> </div>
<div class="line"><a name="l00313"></a><span class="lineno"> 313</span> <span class="keyword">template</span><<span class="keyword">class</span> T> Box<PointType>& operator +=(<span class="keyword">const</span> std::vector<T>& vec) {</div>
<div class="line"><a name="l00314"></a><span class="lineno"> 314</span>  <span class="keywordtype">int</span> size = vec.size();</div>
<div class="line"><a name="l00315"></a><span class="lineno"> 315</span>  <span class="keywordflow">if</span> ( size == 2 || size == 3) {</div>
<div class="line"><a name="l00316"></a><span class="lineno"> 316</span>  this->min_corner() += vec;</div>
<div class="line"><a name="l00317"></a><span class="lineno"> 317</span>  this->max_corner() += vec;</div>
<div class="line"><a name="l00318"></a><span class="lineno"> 318</span>  normalize();</div>
<div class="line"><a name="l00319"></a><span class="lineno"> 319</span>  }</div>
<div class="line"><a name="l00320"></a><span class="lineno"> 320</span> </div>
<div class="line"><a name="l00321"></a><span class="lineno"> 321</span>  <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00322"></a><span class="lineno"> 322</span> }</div>
<div class="line"><a name="l00323"></a><span class="lineno"> 323</span> </div>
<div class="line"><a name="l00324"></a><span class="lineno"> 324</span> Box<PointType>& operator +=(<span class="keyword">const</span> Box<PointType>& box) {</div>
<div class="line"><a name="l00325"></a><span class="lineno"> 325</span>  <span class="keywordflow">if</span> ( !box.isValid())</div>
<div class="line"><a name="l00326"></a><span class="lineno"> 326</span>  <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00327"></a><span class="lineno"> 327</span> </div>
<div class="line"><a name="l00328"></a><span class="lineno"> 328</span>  operator+=(box.min_corner());</div>
<div class="line"><a name="l00329"></a><span class="lineno"> 329</span>  operator+=(box.max_corner());</div>
<div class="line"><a name="l00330"></a><span class="lineno"> 330</span>  <span class="keywordflow">return</span> *<span class="keyword">this</span>;</div>
<div class="line"><a name="l00331"></a><span class="lineno"> 331</span> }</div>
<div class="line"><a name="l00332"></a><span class="lineno"> 332</span> </div>
<div class="line"><a name="l00333"></a><span class="lineno"> 333</span> <span class="keywordtype">bool</span> operator==(<span class="keyword">const</span> Box<PointType>& box )<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00334"></a><span class="lineno"> 334</span>  <span class="keywordflow">if</span> ( !box.isValid())</div>
<div class="line"><a name="l00335"></a><span class="lineno"> 335</span>  <span class="keywordflow">return</span> <span class="keyword">false</span>;</div>
<div class="line"><a name="l00336"></a><span class="lineno"> 336</span> </div>
<div class="line"><a name="l00337"></a><span class="lineno"> 337</span>  <span class="keywordflow">return</span> box.max_corner() == this->max_corner() && this->min_corner() == box.min_corner();</div>
<div class="line"><a name="l00338"></a><span class="lineno"> 338</span> }</div>
<div class="line"><a name="l00339"></a><span class="lineno"> 339</span> </div>
<div class="line"><a name="l00340"></a><span class="lineno"> 340</span> <span class="keywordtype">bool</span> operator!=(<span class="keyword">const</span> Box<PointType>& box )<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00341"></a><span class="lineno"> 341</span>  <span class="keywordflow">return</span> !(operator==(box));</div>
<div class="line"><a name="l00342"></a><span class="lineno"> 342</span> }</div>
<div class="line"><a name="l00343"></a><span class="lineno"> 343</span> </div>
<div class="line"><a name="l00344"></a><span class="lineno"> 344</span> QVariant impliedValue(<span class="keyword">const</span> QVariant& v)<span class="keyword"> const</span>{</div>
<div class="line"><a name="l00345"></a><span class="lineno"> 345</span>  QString type = v.typeName();</div>
<div class="line"><a name="l00346"></a><span class="lineno"> 346</span>  <span class="keywordtype">bool</span> ok = type == <span class="stringliteral">"Ilwis::Box<Pixel>"</span> || type == <span class="stringliteral">"Ilwis::Box<Coordinate>"</span> ||</div>
<div class="line"><a name="l00347"></a><span class="lineno"> 347</span>  type == <span class="stringliteral">"Ilwis::Box<Pixeld>"</span> ;</div>
<div class="line"><a name="l00348"></a><span class="lineno"> 348</span>  <span class="keywordflow">if</span> (!ok){</div>
<div class="line"><a name="l00349"></a><span class="lineno"> 349</span>  <span class="keywordflow">return</span> sUNDEF;</div>
<div class="line"><a name="l00350"></a><span class="lineno"> 350</span>  }</div>
<div class="line"><a name="l00351"></a><span class="lineno"> 351</span>  <span class="keywordflow">if</span> ( type == <span class="stringliteral">"Ilwis::Box<Coordinate>"</span>){</div>
<div class="line"><a name="l00352"></a><span class="lineno"> 352</span>  Box<Coordinate> box = v.value<Box<Coordinate>>();</div>
<div class="line"><a name="l00353"></a><span class="lineno"> 353</span>  <span class="keywordflow">return</span> box.toString();</div>
<div class="line"><a name="l00354"></a><span class="lineno"> 354</span>  }</div>
<div class="line"><a name="l00355"></a><span class="lineno"> 355</span>  <span class="keywordflow">if</span> ( type == <span class="stringliteral">"Ilwis::Box<Pixel>"</span>){</div>
<div class="line"><a name="l00356"></a><span class="lineno"> 356</span>  Box<Pixel> box = v.value<Box<Pixel>>();</div>
<div class="line"><a name="l00357"></a><span class="lineno"> 357</span>  <span class="keywordflow">return</span> box.toString();</div>
<div class="line"><a name="l00358"></a><span class="lineno"> 358</span>  }</div>
<div class="line"><a name="l00359"></a><span class="lineno"> 359</span>  <span class="keywordflow">if</span> ( type == <span class="stringliteral">"Ilwis::Box<Pixeld>"</span>){</div>
<div class="line"><a name="l00360"></a><span class="lineno"> 360</span>  Box<Pixeld> box = v.value<Box<Pixeld>>();</div>
<div class="line"><a name="l00361"></a><span class="lineno"> 361</span>  <span class="keywordflow">return</span> box.toString();</div>
<div class="line"><a name="l00362"></a><span class="lineno"> 362</span>  }</div>
<div class="line"><a name="l00363"></a><span class="lineno"> 363</span>  <span class="keywordflow">return</span> sUNDEF;</div>
<div class="line"><a name="l00364"></a><span class="lineno"> 364</span> </div>
<div class="line"><a name="l00365"></a><span class="lineno"> 365</span> }</div>
<div class="line"><a name="l00366"></a><span class="lineno"> 366</span> </div>
<div class="line"><a name="l00367"></a><span class="lineno"> 367</span> <span class="keyword">template</span><<span class="keyword">typename</span> T> <span class="keywordtype">void</span> ensure(<span class="keyword">const</span> Size<T>& sz) {</div>
<div class="line"><a name="l00368"></a><span class="lineno"> 368</span>  <span class="keywordflow">if</span> ( xlength() > sz.xsize()) {</div>
<div class="line"><a name="l00369"></a><span class="lineno"> 369</span>  this->max_corner().x = sz.xsize() - 1 ;</div>
<div class="line"><a name="l00370"></a><span class="lineno"> 370</span>  }</div>
<div class="line"><a name="l00371"></a><span class="lineno"> 371</span>  <span class="keywordflow">if</span> ( ylength() > sz.ysize()) {</div>
<div class="line"><a name="l00372"></a><span class="lineno"> 372</span>  this->max_corner().y = sz.ysize() - 1 ;</div>
<div class="line"><a name="l00373"></a><span class="lineno"> 373</span>  }</div>
<div class="line"><a name="l00374"></a><span class="lineno"> 374</span>  <span class="keywordflow">if</span> ( zlength() > sz.zsize()) {</div>
<div class="line"><a name="l00375"></a><span class="lineno"> 375</span>  this->max_corner().z = sz.zsize() - 1 ;</div>
<div class="line"><a name="l00376"></a><span class="lineno"> 376</span>  }</div>
<div class="line"><a name="l00377"></a><span class="lineno"> 377</span> }</div>
<div class="line"><a name="l00378"></a><span class="lineno"> 378</span> </div>
<div class="line"><a name="l00379"></a><span class="lineno"> 379</span> <span class="keywordtype">void</span> copyFrom(<span class="keyword">const</span> Box<PointType>& box, quint32 dimensions=dimX | dimY | dimZ) {</div>
<div class="line"><a name="l00380"></a><span class="lineno"> 380</span>  <span class="keywordflow">if</span> ( dimensions & dimX) {</div>
<div class="line"><a name="l00381"></a><span class="lineno"> 381</span>  this->min_corner().x = box.min_corner().x;</div>
<div class="line"><a name="l00382"></a><span class="lineno"> 382</span>  this->max_corner().x =box.max_corner().x;</div>
<div class="line"><a name="l00383"></a><span class="lineno"> 383</span>  }</div>
<div class="line"><a name="l00384"></a><span class="lineno"> 384</span>  <span class="keywordflow">if</span> ( dimensions & dimY) {</div>
<div class="line"><a name="l00385"></a><span class="lineno"> 385</span>  this->min_corner().y = box.min_corner().y;</div>
<div class="line"><a name="l00386"></a><span class="lineno"> 386</span>  this->max_corner().y = box.max_corner().y;</div>
<div class="line"><a name="l00387"></a><span class="lineno"> 387</span>  }</div>
<div class="line"><a name="l00388"></a><span class="lineno"> 388</span>  <span class="keywordflow">if</span> ( dimensions & dimZ) {</div>
<div class="line"><a name="l00389"></a><span class="lineno"> 389</span>  this->min_corner().z = box.min_corner().z;</div>
<div class="line"><a name="l00390"></a><span class="lineno"> 390</span>  this->max_corner().z = box.max_corner().z;</div>
<div class="line"><a name="l00391"></a><span class="lineno"> 391</span>  }</div>
<div class="line"><a name="l00392"></a><span class="lineno"> 392</span> }</div>
<div class="line"><a name="l00393"></a><span class="lineno"> 393</span> </div>
<div class="line"><a name="l00394"></a><span class="lineno"> 394</span> </div>
<div class="line"><a name="l00395"></a><span class="lineno"><a class="code" href="class_ilwis_1_1_box.html#ab1b9531b3c86db9a4d373ac53c4f910b"> 395</a></span> QString <a class="code" href="class_ilwis_1_1_box.html#ab1b9531b3c86db9a4d373ac53c4f910b">toString</a>()<span class="keyword"> const </span>{</div>
<div class="line"><a name="l00396"></a><span class="lineno"> 396</span>  <span class="keywordflow">if</span> ( is3D()) {</div>
<div class="line"><a name="l00397"></a><span class="lineno"> 397</span>  <span class="keywordflow">if</span> (this->min_corner().valuetype() == itDOUBLE)</div>
<div class="line"><a name="l00398"></a><span class="lineno"> 398</span>  <span class="keywordflow">return</span> QString(<span class="stringliteral">"POLYGON(%1 %2 %3,%4 %5 %6)"</span>).</div>
<div class="line"><a name="l00399"></a><span class="lineno"> 399</span>  arg((<span class="keywordtype">double</span>)this->min_corner().x,0,<span class="charliteral">'g'</span>).</div>
<div class="line"><a name="l00400"></a><span class="lineno"> 400</span>  arg((<span class="keywordtype">double</span>)this->min_corner().y,0,<span class="charliteral">'g'</span>).</div>
<div class="line"><a name="l00401"></a><span class="lineno"> 401</span>  arg((<span class="keywordtype">double</span>)this->min_corner().z,0,<span class="charliteral">'g'</span>).</div>
<div class="line"><a name="l00402"></a><span class="lineno"> 402</span>  arg((<span class="keywordtype">double</span>)this->max_corner().x,0,<span class="charliteral">'g'</span>).</div>
<div class="line"><a name="l00403"></a><span class="lineno"> 403</span>  arg((<span class="keywordtype">double</span>)this->max_corner().y,0,<span class="charliteral">'g'</span>).</div>
<div class="line"><a name="l00404"></a><span class="lineno"> 404</span>  arg((<span class="keywordtype">double</span>)this->max_corner().z,0,<span class="charliteral">'g'</span>);</div>
<div class="line"><a name="l00405"></a><span class="lineno"> 405</span>  <span class="keywordflow">else</span></div>
<div class="line"><a name="l00406"></a><span class="lineno"> 406</span>  <span class="keywordflow">return</span> QString(<span class="stringliteral">"POLYGON(%1 %2 %3,%4 %5 %6)"</span>).arg(this->min_corner().x).</div>
<div class="line"><a name="l00407"></a><span class="lineno"> 407</span>  arg(this->min_corner().y).</div>
<div class="line"><a name="l00408"></a><span class="lineno"> 408</span>  arg(this->min_corner().z).</div>
<div class="line"><a name="l00409"></a><span class="lineno"> 409</span>  arg(this->max_corner().x).</div>
<div class="line"><a name="l00410"></a><span class="lineno"> 410</span>  arg(this->max_corner().y).</div>
<div class="line"><a name="l00411"></a><span class="lineno"> 411</span>  arg(this->max_corner().z);</div>
<div class="line"><a name="l00412"></a><span class="lineno"> 412</span> </div>
<div class="line"><a name="l00413"></a><span class="lineno"> 413</span> </div>
<div class="line"><a name="l00414"></a><span class="lineno"> 414</span>  }<span class="keywordflow">else</span> {</div>
<div class="line"><a name="l00415"></a><span class="lineno"> 415</span>  <span class="keywordflow">if</span> (this->min_corner().valuetype() == itDOUBLE)</div>
<div class="line"><a name="l00416"></a><span class="lineno"> 416</span>  <span class="keywordflow">return</span> QString(<span class="stringliteral">"POLYGON(%1 %2,%3 %4)"</span>).</div>
<div class="line"><a name="l00417"></a><span class="lineno"> 417</span>  arg((<span class="keywordtype">double</span>)this->min_corner().x,0,<span class="charliteral">'g'</span>).</div>
<div class="line"><a name="l00418"></a><span class="lineno"> 418</span>  arg((<span class="keywordtype">double</span>)this->min_corner().y,0,<span class="charliteral">'g'</span>).</div>
<div class="line"><a name="l00419"></a><span class="lineno"> 419</span>  arg((<span class="keywordtype">double</span>)this->max_corner().x,0,<span class="charliteral">'g'</span>).</div>
<div class="line"><a name="l00420"></a><span class="lineno"> 420</span>  arg((<span class="keywordtype">double</span>)this->max_corner().y,0,<span class="charliteral">'g'</span>);</div>
<div class="line"><a name="l00421"></a><span class="lineno"> 421</span>  <span class="keywordflow">else</span></div>
<div class="line"><a name="l00422"></a><span class="lineno"> 422</span>  <span class="keywordflow">return</span> QString(<span class="stringliteral">"POLYGON(%1 %2,%3 %4)"</span>).</div>
<div class="line"><a name="l00423"></a><span class="lineno"> 423</span>  arg(this->min_corner().x).</div>
<div class="line"><a name="l00424"></a><span class="lineno"> 424</span>  arg(this->min_corner().y).</div>
<div class="line"><a name="l00425"></a><span class="lineno"> 425</span>  arg(this->max_corner().x).</div>
<div class="line"><a name="l00426"></a><span class="lineno"> 426</span>  arg(this->max_corner().y);</div>
<div class="line"><a name="l00427"></a><span class="lineno"> 427</span>  }</div>
<div class="line"><a name="l00428"></a><span class="lineno"> 428</span> </div>
<div class="line"><a name="l00429"></a><span class="lineno"> 429</span> }</div>
<div class="line"><a name="l00430"></a><span class="lineno"> 430</span> </div>
<div class="line"><a name="l00431"></a><span class="lineno"> 431</span> <span class="keyword">private</span>:</div>
<div class="line"><a name="l00432"></a><span class="lineno"> 432</span>  PointType _min_corner;</div>
<div class="line"><a name="l00433"></a><span class="lineno"> 433</span>  PointType _max_corner;</div>
<div class="line"><a name="l00434"></a><span class="lineno"> 434</span> </div>
<div class="line"><a name="l00435"></a><span class="lineno"> 435</span> </div>
<div class="line"><a name="l00436"></a><span class="lineno"> 436</span> <span class="keywordtype">void</span> normalize() {</div>
<div class="line"><a name="l00437"></a><span class="lineno"> 437</span>  PointType& pmin = this->min_corner();</div>
<div class="line"><a name="l00438"></a><span class="lineno"> 438</span>  PointType& pmax = this->max_corner();</div>
<div class="line"><a name="l00439"></a><span class="lineno"> 439</span>  <span class="keywordflow">if</span> ( pmin.x > pmax.x) {</div>
<div class="line"><a name="l00440"></a><span class="lineno"> 440</span>  <span class="keywordtype">double</span> v1 = pmin.x;</div>
<div class="line"><a name="l00441"></a><span class="lineno"> 441</span>  <span class="keywordtype">double</span> v2 = pmax.x;</div>
<div class="line"><a name="l00442"></a><span class="lineno"> 442</span>  std::swap(v1, v2);</div>
<div class="line"><a name="l00443"></a><span class="lineno"> 443</span>  pmin.x = v1;</div>
<div class="line"><a name="l00444"></a><span class="lineno"> 444</span>  pmax.x = v2;</div>
<div class="line"><a name="l00445"></a><span class="lineno"> 445</span> </div>
<div class="line"><a name="l00446"></a><span class="lineno"> 446</span>  }</div>
<div class="line"><a name="l00447"></a><span class="lineno"> 447</span>  <span class="keywordflow">if</span> ( pmin.y > pmax.y) {</div>
<div class="line"><a name="l00448"></a><span class="lineno"> 448</span>  <span class="keywordtype">double</span> v1 = pmin.y;</div>
<div class="line"><a name="l00449"></a><span class="lineno"> 449</span>  <span class="keywordtype">double</span> v2 = pmax.y;</div>
<div class="line"><a name="l00450"></a><span class="lineno"> 450</span>  std::swap(v1, v2);</div>
<div class="line"><a name="l00451"></a><span class="lineno"> 451</span>  pmin.y = v1;</div>
<div class="line"><a name="l00452"></a><span class="lineno"> 452</span>  pmax.y = v2;</div>
<div class="line"><a name="l00453"></a><span class="lineno"> 453</span>  }</div>
<div class="line"><a name="l00454"></a><span class="lineno"> 454</span>  <span class="keywordflow">if</span> ( pmin.z > pmax.z) {</div>
<div class="line"><a name="l00455"></a><span class="lineno"> 455</span>  <span class="keywordtype">double</span> v1 = pmin.z;</div>
<div class="line"><a name="l00456"></a><span class="lineno"> 456</span>  <span class="keywordtype">double</span> v2 = pmax.z;</div>
<div class="line"><a name="l00457"></a><span class="lineno"> 457</span>  std::swap(v1, v2);</div>
<div class="line"><a name="l00458"></a><span class="lineno"> 458</span>  pmin.z = v1;</div>
<div class="line"><a name="l00459"></a><span class="lineno"> 459</span>  pmax.z = v2;</div>
<div class="line"><a name="l00460"></a><span class="lineno"> 460</span>  }</div>
<div class="line"><a name="l00461"></a><span class="lineno"> 461</span> </div>
<div class="line"><a name="l00462"></a><span class="lineno"> 462</span> }</div>
<div class="line"><a name="l00463"></a><span class="lineno"> 463</span> </div>
<div class="line"><a name="l00464"></a><span class="lineno"> 464</span> </div>
<div class="line"><a name="l00465"></a><span class="lineno"> 465</span> };</div>
<div class="line"><a name="l00466"></a><span class="lineno"> 466</span> </div>
<div class="line"><a name="l00467"></a><span class="lineno"> 467</span> <span class="keyword">template</span><<span class="keyword">typename</span> Po<span class="keywordtype">int</span>Type> Box<PointType> operator *(<span class="keyword">const</span> Box<PointType>& box, <span class="keyword">const</span> <span class="keywordtype">double</span>& v) {</div>
<div class="line"><a name="l00468"></a><span class="lineno"> 468</span>  PointType pmin = box.min_corner();</div>
<div class="line"><a name="l00469"></a><span class="lineno"> 469</span>  PointType pmax = box.max_corner();</div>
<div class="line"><a name="l00470"></a><span class="lineno"> 470</span>  <span class="keywordtype">double</span> deltaX = box.xlength() * v / 2;</div>
<div class="line"><a name="l00471"></a><span class="lineno"> 471</span>  <span class="keywordtype">double</span> deltaY = box.ylength() * v / 2;</div>
<div class="line"><a name="l00472"></a><span class="lineno"> 472</span>  <span class="keywordtype">double</span> deltaZ = box.is3d() ? box.zlength() * v / 2 : 0;</div>
<div class="line"><a name="l00473"></a><span class="lineno"> 473</span>  pmin -= {deltaX, deltaY, deltaZ};</div>
<div class="line"><a name="l00474"></a><span class="lineno"> 474</span>  pmax += {deltaX, deltaY, deltaZ};</div>
<div class="line"><a name="l00475"></a><span class="lineno"> 475</span>  <span class="keywordflow">return</span> Box<PointType>(pmin, pmax);</div>
<div class="line"><a name="l00476"></a><span class="lineno"> 476</span> }</div>
<div class="line"><a name="l00477"></a><span class="lineno"> 477</span> </div>
<div class="line"><a name="l00478"></a><span class="lineno"> 478</span> <span class="keyword">typedef</span> <a class="code" href="class_ilwis_1_1_box.html">Ilwis::Box<Ilwis::Pixel></a> BoundingBox;</div>
<div class="line"><a name="l00479"></a><span class="lineno"> 479</span> <span class="keyword">typedef</span> <a class="code" href="class_ilwis_1_1_box.html">Ilwis::Box<Ilwis::Coordinate></a> Envelope;</div>
<div class="line"><a name="l00480"></a><span class="lineno"> 480</span> </div>
<div class="line"><a name="l00481"></a><span class="lineno"> 481</span> }</div>
<div class="line"><a name="l00482"></a><span class="lineno"> 482</span> </div>
<div class="line"><a name="l00483"></a><span class="lineno"> 483</span> </div>
<div class="line"><a name="l00484"></a><span class="lineno"> 484</span> Q_DECLARE_METATYPE(<a class="code" href="class_ilwis_1_1_box.html">Ilwis::BoundingBox</a>)</div>
<div class="line"><a name="l00485"></a><span class="lineno"> 485</span> Q_DECLARE_METATYPE(Ilwis::Box<Ilwis::Pixeld>)</div>
<div class="line"><a name="l00486"></a><span class="lineno"> 486</span> Q_DECLARE_METATYPE(Ilwis::Envelope)</div>
<div class="line"><a name="l00487"></a><span class="lineno"> 487</span> </div>
<div class="line"><a name="l00488"></a><span class="lineno"> 488</span> </div>
<div class="line"><a name="l00489"></a><span class="lineno"> 489</span> </div>
<div class="line"><a name="l00490"></a><span class="lineno"> 490</span> <span class="preprocessor">#endif // BOX_H</span></div>
</div><!-- fragment --></div><!-- contents -->
<!-- start footer part -->
<hr class="footer"/><address class="footer"><small>
Generated on Fri Mar 28 2014 13:51:04 for Ilwis-Objects by  <a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/>
</a> 1.8.3.1
</small></address>
</body>
</html>
| ridoo/IlwisCore | CPPAPI/box_8h_source.html | HTML | apache-2.0 | 71,383 |
package eu.dowsing.kolla.widget.brick.facade;
import javafx.scene.layout.Pane;
import javafx.scene.paint.Color;
import javafx.scene.shape.Circle;
import javafx.scene.shape.CircleBuilder;
import javafx.scene.shape.Rectangle;
import javafx.scene.shape.RectangleBuilder;
import com.leapmotion.leap.Hand;
import eu.dowsing.kolla.widget.brick.model.BrickModel;
import eu.dowsing.kolla.widget.brick.model.BrickModel.Position;
/**
* Represents a complete hand including its fingers.
*
* @author richardg
*
*/
public class BrickView {
// port(left hand:red) and starboard(right hand:green)
public enum Importance {
PRIMARY, SECONDARY
}
private Rectangle horizontal;
private Rectangle vertical;
private Rectangle[] fingerRects;
private Circle hint;
/** Hints at where the gesture started. **/
private Circle startHint;
public BrickView(Pane p, int rectHeight, int rectWidth, int rectX, int rectY, int miniRectHeight, int miniRectWidth) {
drawIndicator(p, rectHeight, rectWidth, rectX, rectY, miniRectHeight, miniRectWidth);
}
private void drawIndicator(Pane p, int hHeight, int hWidth, int rectX, int rectY, int mHeight, int mWidth) {
final int fingerCount = 5;
fingerRects = new Rectangle[fingerCount];
final int rectMargin = 10;
final int hRealWidth = hWidth - (2 * rectMargin);
// create the measure for the mini finger rectangles
int miniRectMargin = rectMargin / 2;
int mRealWidth = mWidth - miniRectMargin;
int mRectX = rectX + (miniRectMargin / 2);
int mRectY = rectY;
// create measures for the vertical rectangle
final int vWidth = hHeight;
final int vHeight = hWidth / 2;
// create the circle indicating where the hand can be
this.hint = CircleBuilder.create().radius(hHeight / 2).centerX(rectX + (hWidth / 2) - (hHeight / 2))
.centerY(rectY + (hHeight / 2)).fill(Color.web("grey", 0.1)).stroke(Color.BLACK).build();
p.getChildren().add(hint);
// create the circle indicating where the gesture started
this.startHint = CircleBuilder.create().radius(hHeight / 2).centerX(rectX + (hWidth / 2) - (hHeight / 2))
.centerY(rectY + (hHeight / 2)).fill(Color.web("grey", 0.1)).stroke(Color.BLACK).build();
p.getChildren().add(startHint);
// create the rectangle indicating position of the hand
horizontal = RectangleBuilder.create().height(hHeight).width(hRealWidth).arcHeight(0).arcWidth(0)
.stroke(Color.RED).fill(Color.web("blue", 0.1)).translateX(rectX).translateY(rectY).build();
p.getChildren().add(horizontal);
// create rectangle indicating if the hand is vertical
vertical = RectangleBuilder.create().height(vHeight).width(vWidth).arcHeight(0).arcWidth(0).stroke(Color.RED)
.fill(Color.web("blue", 0.1)).translateX(rectX + (vWidth / 2)).translateY(rectY - (vHeight / 2))
.build();
p.getChildren().add(vertical);
// now create the rectangles indicating fingers found
for (int i = 0; i < fingerRects.length; i++) {
Rectangle mini = RectangleBuilder.create().height(mHeight).width(mRealWidth).arcHeight(0).arcWidth(0)
.stroke(Color.GREEN).fill(Color.web("blue", 0.1)).translateX(mRectX + (i * mWidth))
.translateY(mRectY).build();
fingerRects[i] = mini;
p.getChildren().add(mini);
}
}
public Color getPitchColor(Hand h) {
double direction = Math.toDegrees(h.direction().pitch());
if (direction < 10 && direction > -10) {
return Color.web("blue", 0.1);
} else if (direction < 100 && direction > 80) {
return Color.web("green", 0.1);
} else if (direction < -80 && direction > -100) {
return Color.web("yellow", 0.1);
} else {
return Color.web("red", 0.1);
}
}
public Color getHandColor(Importance importance) {
// port(left hand/secondary:red) and starboard(right hand/primary:green)
if (importance == Importance.PRIMARY) {
return Color.web("green", 1);
} else if (importance == Importance.SECONDARY) {
return Color.web("red", 1);
} else {
return Color.web("yellow", 1);
}
}
public void setShowGestureStart(Importance importance) {
Color fill = getHandColor(importance);
this.startHint.setVisible(true);
this.startHint.setFill(fill);
}
/**
* Show the hand
*
* @param importance
* @param pos
* @param fingerCount
* @param handledGesture
*/
public void showHand(Importance importance, Position pos, int fingerCount, boolean handledGesture) {
// first all rectangles visible
setVisible(true);
// hide vertical or horizontal position
Color fill = getHandColor(importance);
if (pos == Position.HORIZONTAL) {
vertical.setVisible(false);
} else if (pos == Position.VERTICAL) {
horizontal.setVisible(false);
}
// notify the user that the gesture was handled
if (handledGesture) {
fill = Color.web("yellow", 1);
}
// color the rectangles
horizontal.setFill(fill);
vertical.setFill(fill);
// then we hide invisible fingers
for (int i = fingerCount; i < fingerRects.length; i++) {
fingerRects[i].setVisible(false);
}
}
/**
* Show or hide the complete hand with all indicators
*
* @param visible
*/
public void setVisible(boolean visible) {
hint.setVisible(visible);
startHint.setVisible(visible);
horizontal.setVisible(visible);
vertical.setVisible(visible);
for (Rectangle rect : this.fingerRects) {
rect.setVisible(visible);
}
}
/**
* Show or hide only the hand hint.
*
* @param visible
*/
public void setHintVisible(boolean visible) {
this.hint.setVisible(visible);
}
}
| N0rp/Snabb | src/main/java/eu/dowsing/kolla/widget/brick/facade/BrickView.java | Java | apache-2.0 | 6,215 |
import os
import datetime
from jinja2 import Environment,PackageLoader,TemplateNotFound
from hotzenplotz.openstack.common import cfg
from hotzenplotz.openstack.common import log as logging
from hotzenplotz.openstack.common import utils
from hotzenplotz.common import exception
from hotzenplotz.api import validator
LOG = logging.getLogger(__name__)
class CronHandler(object):
"""Handler Cron Resource
"""
def __init__(self, **kwargs):
env = Environment(loader=PackageLoader('hotzenplotz.worker','templates'))
self.template = env.get_template('cron')
self.dir_path = None
# @utils.synchronized('haproxy')
def do_config(self, request):
try:
self._validate_request(request)
except exception.BadRequest as e:
LOG.warn('Bad request: %s' % e)
raise exception.CronConfigureError(explanation=str(e))
cmd = request['method']
msg = request['cron_resource']
if cmd == 'create_cron':
try:
self._create_cron(msg)
except exception.CronCreateError as e:
raise exception.CronConfigureError(explanation=str(e))
elif cmd == 'delete_cron':
try:
self._delete_cron(msg)
except exception.HaproxyDeleteError as e:
raise exception.CronConfigureError(explanation=str(e))
elif cmd == 'update_cron':
try:
self._update_cron(msg)
except exception.CronUpdateError as e:
raise exception.CronConfigureError(explanation=str(e))
def _create_cron(self,msg,syntax_check=False):
try:
output = self.template.render(cron_resource=msg)
except TemplateNotFound as e:
raise TemplateNotFound(str(e))
try:
if not self.dir_path:
self.dir_path = '/etc/puppet/modules/cron/'
cron_name = msg['title']
file_path = self.dir_path + cron_name
if not path.exists(file_path):
with open(file_path,'a') as f:
f.write(output)
except exception.CronCreateError as e:
raise exception.CronCreateError(explanation=str(e))
if syntax_check:
try:
self._test_syntax(file_path)
except exception.ProcessExecutionError as e:
raise exception.CronCreateError(explanation=str(e))
LOG.debug("Created the new cron successfully")
def _delete_cron(self, msg):
LOG.debug("Deleting cron for NAME:%s USER: %s PROJECT:%s" %
(msg['id'], msg['user_id'], msg['project_id']))
try:
new_cfg_path = self._create_lb_deleted_haproxy_cfg(msg)
except exception.HaproxyLBNotExists as e:
LOG.warn('%s', e)
return
##raise exception.HaproxyDeleteError(explanation=str(e))
try:
self._test_haproxy_config(new_cfg_path)
except exception.ProcessExecutionError as e:
raise exception.HaproxyDeleteError(explanation=str(e))
rc, backup_path = self._backup_original_cfg()
if rc != 0:
raise exception.HaproxyDeleteError(explanation=backup_path)
rc, strerror = self._replace_original_cfg_with_new(new_cfg_path)
if rc != 0:
raise exception.HaproxyDeleteError(explanation=strerror)
if self._reload_haproxy_cfg(backup_path) != 0:
e = 'Failed to reload haproxy'
raise exception.HaproxyDeleteError(explanation=str(e))
LOG.debug("Deleted the new load balancer successfully")
def _update_cron(self, msg):
LOG.debug("Updating the haproxy load "
"balancer for NAME:%s USER: %s PROJECT:%s" %
(msg['uuid'], msg['user_id'], msg['project_id']))
try:
lb_deleted_cfg_path = self._create_lb_deleted_haproxy_cfg(msg)
except exception.HaproxyLBNotExists as e:
LOG.warn('%s', e)
raise exception.HaproxyUpdateError(explanation=str(e))
try:
new_cfg_path = self._create_lb_haproxy_cfg(
msg, base_cfg_path=lb_deleted_cfg_path)
except exception.HaproxyCreateCfgError as e:
raise exception.HaproxyUpdateError(explanation=str(e))
try:
self._test_haproxy_config(new_cfg_path)
except exception.ProcessExecutionError as e:
raise exception.HaproxyUpdateError(explanation=str(e))
LOG.debug("Updated the new load balancer successfully")
def _validate_request(self, request):
validate.check_tcp_request(request)
def _get_lb_name(self, msg):
# TODO(wenjianhn): utf-8 support, base64
##return "%s_%s" % (msg['project_id'],
return "%s" % msg['uuid']
def _is_lb_in_use(self, lb_name,
base_cfg_path='/etc/haproxy/haproxy.cfg'):
with open(base_cfg_path) as cfg:
lines = cfg.readlines()
try:
in_use_lb_name = [line.split()[1] for line in lines
if line.startswith('listen')]
except IndexError:
LOG.error("No item was found after listen directive,"
"is the haproxy configuraion file valid?")
raise
return lb_name in in_use_lb_name
def _test_syntax(self, cfile_path):
LOG.info('Testing the new puppet configuration file')
cmd = "puppet parser validate %s" % cfile_path
try:
utils.execute(cmd)
except exception.ProcessExecutionError as e:
LOG.warn('Did not pass the configuration syntax test: %s', e)
raise
def _get_one_lb_info(self, line_all, line_index, line_total):
value = []
for i in range(line_index, line_total):
line = line_all[i]
if line.startswith('\t'):
value.append(line)
elif line.startswith('listen'):
return i, value
return line_total - 1, value
| NewpTone/hotzenplotz | hotzenplotz/worker/driver/cron.py | Python | apache-2.0 | 6,103 |
/*
* Copyright (c) 2016, WSO2 Inc. (http://wso2.com) All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.wso2.msf4j;
import io.netty.buffer.ByteBuf;
import java.io.Closeable;
import java.io.IOException;
import java.nio.ByteBuffer;
/**
* A responder for sending chunk-encoded response.
*/
public interface ChunkResponder extends Closeable {
/**
* Adds a chunk of data to the response. The content will be sent to the client asynchronously.
*
* @param chunk content to send
* @throws IOException if the connection is already closed
*/
void sendChunk(ByteBuffer chunk) throws IOException;
/**
* Adds a chunk of data to the response. The content will be sent to the client asynchronously.
*
* @param chunk content to send
* @throws IOException if this {@link ChunkResponder} already closed or the connection is closed
*/
void sendChunk(ByteBuf chunk) throws IOException;
/**
* Closes this responder which signals the end of the chunk response.
*/
@Override
void close() throws IOException;
}
| taniamahanama/product-msf4j | core/src/main/java/org/wso2/msf4j/ChunkResponder.java | Java | apache-2.0 | 1,625 |
# AUTOGENERATED FILE
FROM balenalib/coral-dev-alpine:3.14-run
# remove several traces of python
RUN apk del python*
# http://bugs.python.org/issue19846
# > At the moment, setting "LANG=C" on a Linux system *fundamentally breaks Python 3*, and that's not OK.
ENV LANG C.UTF-8
# install python dependencies
RUN apk add --no-cache ca-certificates libffi \
&& apk add --no-cache libssl1.0 || apk add --no-cache libssl1.1
# key 63C7CC90: public key "Simon McVittie <[email protected]>" imported
# key 3372DCFA: public key "Donald Stufft (dstufft) <[email protected]>" imported
RUN gpg --keyserver keyring.debian.org --recv-keys 4DE8FF2A63C7CC90 \
&& gpg --keyserver keyserver.ubuntu.com --recv-key 6E3CBCE93372DCFA \
&& gpg --keyserver keyserver.ubuntu.com --recv-keys 0x52a43a1e4b77b059
# point Python at a system-provided certificate database. Otherwise, we might hit CERTIFICATE_VERIFY_FAILED.
# https://www.python.org/dev/peps/pep-0476/#trust-database
ENV SSL_CERT_FILE /etc/ssl/certs/ca-certificates.crt
ENV PYTHON_VERSION 3.10.0
# if this is called "PIP_VERSION", pip explodes with "ValueError: invalid truth value '<VERSION>'"
ENV PYTHON_PIP_VERSION 21.2.4
ENV SETUPTOOLS_VERSION 58.0.0
RUN set -x \
&& buildDeps=' \
curl \
gnupg \
' \
&& apk add --no-cache --virtual .build-deps $buildDeps \
&& curl -SLO "http://resin-packages.s3.amazonaws.com/python/v$PYTHON_VERSION/Python-$PYTHON_VERSION.linux-alpine-aarch64-libffi3.3.tar.gz" \
&& echo "13ab188bd0214779de247bbde0919f4c19c91f78a34d26171b567b556a06c828 Python-$PYTHON_VERSION.linux-alpine-aarch64-libffi3.3.tar.gz" | sha256sum -c - \
&& tar -xzf "Python-$PYTHON_VERSION.linux-alpine-aarch64-libffi3.3.tar.gz" --strip-components=1 \
&& rm -rf "Python-$PYTHON_VERSION.linux-alpine-aarch64-libffi3.3.tar.gz" \
&& if [ ! -e /usr/local/bin/pip3 ]; then : \
&& curl -SLO "https://raw.githubusercontent.com/pypa/get-pip/430ba37776ae2ad89f794c7a43b90dc23bac334c/get-pip.py" \
&& echo "19dae841a150c86e2a09d475b5eb0602861f2a5b7761ec268049a662dbd2bd0c get-pip.py" | sha256sum -c - \
&& python3 get-pip.py \
&& rm get-pip.py \
; fi \
&& pip3 install --no-cache-dir --upgrade --force-reinstall pip=="$PYTHON_PIP_VERSION" setuptools=="$SETUPTOOLS_VERSION" \
&& find /usr/local \
\( -type d -a -name test -o -name tests \) \
-o \( -type f -a -name '*.pyc' -o -name '*.pyo' \) \
-exec rm -rf '{}' + \
&& cd / \
&& rm -rf /usr/src/python ~/.cache
# make some useful symlinks that are expected to exist
RUN cd /usr/local/bin \
&& ln -sf pip3 pip \
&& { [ -e easy_install ] || ln -s easy_install-* easy_install; } \
&& ln -sf idle3 idle \
&& ln -sf pydoc3 pydoc \
&& ln -sf python3 python \
&& ln -sf python3-config python-config
CMD ["echo","'No CMD command was set in Dockerfile! Details about CMD command could be found in Dockerfile Guide section in our Docs. Here's the link: https://balena.io/docs"]
RUN curl -SLO "https://raw.githubusercontent.com/balena-io-library/base-images/8accad6af708fca7271c5c65f18a86782e19f877/scripts/assets/tests/[email protected]" \
&& echo "Running test-stack@python" \
&& chmod +x [email protected] \
&& bash [email protected] \
&& rm -rf [email protected]
RUN [ ! -d /.balena/messages ] && mkdir -p /.balena/messages; echo $'Here are a few details about this Docker image (For more information please visit https://www.balena.io/docs/reference/base-images/base-images/): \nArchitecture: ARM v8 \nOS: Alpine Linux 3.14 \nVariant: run variant \nDefault variable(s): UDEV=off \nThe following software stack is preinstalled: \nPython v3.10.0, Pip v21.2.4, Setuptools v58.0.0 \nExtra features: \n- Easy way to install packages with `install_packages <package-name>` command \n- Run anywhere with cross-build feature (for ARM only) \n- Keep the container idling with `balena-idle` command \n- Show base image details with `balena-info` command' > /.balena/messages/image-info
RUN echo $'#!/bin/bash\nbalena-info\nbusybox ln -sf /bin/busybox /bin/sh\n/bin/sh "$@"' > /bin/sh-shim \
&& chmod +x /bin/sh-shim \
&& ln -f /bin/sh /bin/sh.real \
&& ln -f /bin/sh-shim /bin/sh | resin-io-library/base-images | balena-base-images/python/coral-dev/alpine/3.14/3.10.0/run/Dockerfile | Dockerfile | apache-2.0 | 4,127 |
# Pseudostellaria heterantha var. heterantha VARIETY
#### Status
ACCEPTED
#### According to
NUB Generator [autonym]
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Caryophyllales/Caryophyllaceae/Pseudostellaria/Pseudostellaria heterantha/Pseudostellaria heterantha heterantha/README.md | Markdown | apache-2.0 | 184 |
# Harpalyce angustiflora Leon & Alain SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Fabales/Fabaceae/Harpalyce/Harpalyce ekmanii/ Syn. Harpalyce angustiflora/README.md | Markdown | apache-2.0 | 192 |
# Bradleia blumei Steud. SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Malpighiales/Phyllanthaceae/Glochidion/Glochidion molle/ Syn. Bradleia blumei/README.md | Markdown | apache-2.0 | 179 |
# Ptyssiglottis leptoneura Hallier f. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Lamiales/Acanthaceae/Ptyssiglottis/Ptyssiglottis leptoneura/README.md | Markdown | apache-2.0 | 185 |
# Fomes microporus (Sw.) Fr., 1885 SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
Grevillea 14(no. 69): 20 (1885)
#### Original name
Boletus microporus Sw., 1806
### Remarks
null | mdoering/backbone | life/Fungi/Basidiomycota/Agaricomycetes/Polyporales/Meripilaceae/Rigidoporus/Rigidoporus microporus/ Syn. Fomes microporus/README.md | Markdown | apache-2.0 | 240 |
# Stellaria vernalis Raunk. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Caryophyllales/Caryophyllaceae/Stellaria/Stellaria vernalis/README.md | Markdown | apache-2.0 | 175 |
# Neofuscelia atroviridis (Essl.) Essl. SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Fungi/Ascomycota/Lecanoromycetes/Lecanorales/Parmeliaceae/Xanthoparmelia/Xanthoparmelia atroviridis/ Syn. Neofuscelia atroviridis/README.md | Markdown | apache-2.0 | 194 |
# Mougeotia genuflexa (Dillwyn) C. Agardh SPECIES
#### Status
ACCEPTED
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Chlorophyta/Zygnematophyceae/Zygnematales/Zygnemataceae/Mougeotia/Mougeotia genuflexa/README.md | Markdown | apache-2.0 | 197 |
# Forsellesia pungens (Brandegee) A. Heller SPECIES
#### Status
SYNONYM
#### According to
The Catalogue of Life, 3rd January 2011
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Crossosomatales/Crossosomataceae/Glossopetalon/Glossopetalon pungens/ Syn. Forsellesia pungens/README.md | Markdown | apache-2.0 | 198 |
/*
* Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License").
* You may not use this file except in compliance with the License.
* A copy of the License is located at
*
* http://aws.amazon.com/apache2.0
*
* or in the "license" file accompanying this file. This file is distributed
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
* express or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
#include <aws/sagemaker/model/ListProcessingJobsResult.h>
#include <aws/core/utils/json/JsonSerializer.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/StringUtils.h>
#include <aws/core/utils/UnreferencedParam.h>
#include <utility>
using namespace Aws::SageMaker::Model;
using namespace Aws::Utils::Json;
using namespace Aws::Utils;
using namespace Aws;
ListProcessingJobsResult::ListProcessingJobsResult()
{
}
ListProcessingJobsResult::ListProcessingJobsResult(const Aws::AmazonWebServiceResult<JsonValue>& result)
{
*this = result;
}
ListProcessingJobsResult& ListProcessingJobsResult::operator =(const Aws::AmazonWebServiceResult<JsonValue>& result)
{
JsonView jsonValue = result.GetPayload().View();
if(jsonValue.ValueExists("ProcessingJobSummaries"))
{
Array<JsonView> processingJobSummariesJsonList = jsonValue.GetArray("ProcessingJobSummaries");
for(unsigned processingJobSummariesIndex = 0; processingJobSummariesIndex < processingJobSummariesJsonList.GetLength(); ++processingJobSummariesIndex)
{
m_processingJobSummaries.push_back(processingJobSummariesJsonList[processingJobSummariesIndex].AsObject());
}
}
if(jsonValue.ValueExists("NextToken"))
{
m_nextToken = jsonValue.GetString("NextToken");
}
return *this;
}
| cedral/aws-sdk-cpp | aws-cpp-sdk-sagemaker/source/model/ListProcessingJobsResult.cpp | C++ | apache-2.0 | 1,879 |
#region License Header
/*
* QUANTLER.COM - Quant Fund Development Platform
* Quantler Core Trading Engine. Copyright 2018 Quantler B.V.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#endregion License Header
using MessagePack;
using System;
using Quantler.Securities;
namespace Quantler.Data.Corporate
{
/// <summary>
/// Dividend amount
/// </summary>
[MessagePackObject]
public class Dividend : DataPointImpl
{
#region Public Constructors
/// <summary>
/// Initializes a new instance of the <see cref="Dividend"/> class.
/// </summary>
public Dividend() =>
DataType = DataType.Dividend;
/// <summary>
/// Initializes a new instance of the <see cref="Dividend"/> class.
/// </summary>
/// <param name="ticker">The ticker.</param>
/// <param name="date">The date.</param>
/// <param name="amount">The amount.</param>
public Dividend(TickerSymbol ticker, DateTime date, decimal amount)
: this()
{
Ticker = ticker;
Occured = date;
TimeZone = TimeZone.Utc;
Amount = amount;
}
#endregion Public Constructors
#region Public Properties
/// <summary>
/// Amount distribution
/// </summary>
[Key(6)]
public decimal Amount
{
get => Price;
set => Price = Math.Round(Price, 2);
}
#endregion Public Properties
}
} | Quantler/Core | Quantler/Data/Corporate/Dividend.cs | C# | apache-2.0 | 2,025 |
/*
* Copyright 2016-present Open Networking Laboratory
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Implementation of YANG node bgpVrfAf's children nodes.
*/
package org.onosproject.yang.gen.v1.ne.bgpcomm.rev20141225.nebgpcomm.bgpcomm.bgpvrfs.bgpvrf.bgpvrfafs.bgpvrfaf; | mengmoya/onos | apps/l3vpn/nel3vpn/nemgr/src/main/java/org/onosproject/yang/gen/v1/ne/bgpcomm/rev20141225/nebgpcomm/bgpcomm/bgpvrfs/bgpvrf/bgpvrfafs/bgpvrfaf/package-info.java | Java | apache-2.0 | 796 |
/// <reference path="./fabricPlugin.ts"/>
/// <reference path="./profileHelpers.ts"/>
/// <reference path="./containerHelpers.ts"/>
/// <reference path="../../helpers/js/storageHelpers.ts"/>
/// <reference path="../../helpers/js/controllerHelpers.ts"/>
/// <reference path="../../helpers/js/selectionHelpers.ts"/>
/// <reference path="../../helpers/js/filterHelpers.ts"/>
module Fabric {
export var ContainerViewController = _module.controller("Fabric.ContainerViewController", ["$scope", "jolokia", "$location", "localStorage", "$route", "workspace", "marked", "ProfileCart", "$dialog", ($scope, jolokia, $location, localStorage, $route, workspace:Workspace, marked, ProfileCart, $dialog) => {
$scope.name = ContainerViewController.name;
$scope.containers = <Array<Container>>[];
$scope.selectedContainers = <Array<Container>>[];
$scope.groupBy = 'none';
$scope.filter = '';
$scope.cartItems = [];
$scope.versionIdFilter = '';
$scope.profileIdFilter = '';
$scope.locationIdFilter = '';
$scope.hasCounts = true;
$scope.toString = Core.toString;
$scope.filterContainersText = 'Filter Containers...';
$scope.filterProfilesText = 'Filter Profiles...';
$scope.filterLocationsText = 'Filter Locations...';
$scope.filterBoxText = $scope.filterContainersText;
$scope.selectedTags = [];
$scope.createLocationDialog = ContainerHelpers.getCreateLocationDialog($scope, $dialog);
var containerFields = ['id', 'profileIds', 'profiles', 'versionId', 'location', 'alive', 'type', 'ensembleServer', 'provisionResult', 'root', 'jolokiaUrl', 'jmxDomains', 'metadata', 'parentId'];
var profileFields = ['id', 'hidden', 'version', 'summaryMarkdown', 'iconURL', 'tags'];
Fabric.initScope($scope, $location, jolokia, workspace);
SelectionHelpers.decorate($scope);
// when viewing profile boxes in container view, disable checkboxes
$scope.viewOnly = true;
StorageHelpers.bindModelToLocalStorage({
$scope: $scope,
$location: $location,
localStorage: localStorage,
modelName: 'groupBy',
paramName: 'groupBy',
initialValue: $scope.groupBy
});
StorageHelpers.bindModelToLocalStorage({
$scope: $scope,
$location: $location,
localStorage: localStorage,
modelName: 'versionIdFilter',
paramName: 'versionIdFilter',
initialValue: $scope.versionIdFilter
});
StorageHelpers.bindModelToLocalStorage({
$scope: $scope,
$location: $location,
localStorage: localStorage,
modelName: 'profileIdFilter',
paramName: 'profileIdFilter',
initialValue: $scope.profileIdFilter
});
StorageHelpers.bindModelToLocalStorage({
$scope: $scope,
$location: $location,
localStorage: localStorage,
modelName: 'locationIdFilter',
paramName: 'locationIdFilter',
initialValue: $scope.locationIdFilter
});
$scope.groupByClass = ControllerHelpers.createClassSelector({
'profileIds': 'btn-primary',
'location': 'btn-primary',
'none': 'btn-primary'
});
$scope.$watch('containers', (newValue, oldValue) => {
if (newValue !== oldValue) {
$scope.selectedContainers = $scope.containers.filter((container) => { return container['selected']; });
}
}, true);
$scope.maybeShowLocation = () => {
return ($scope.groupBy === 'location' || $scope.groupBy === 'none') && $scope.selectedContainers.length > 0;
}
$scope.showContainersFor = (thing) => {
if (angular.isString(thing)) {
$scope.locationIdFilter = thing;
} else {
$scope.profileIdFilter = thing.id;
$scope.versionIdFilter = thing.version;
}
$scope.groupBy = 'none';
}
$scope.filterLocation = (locationId) => {
return FilterHelpers.searchObject(locationId, $scope.filter);
}
$scope.filterProfiles = (profile) => {
return FilterHelpers.searchObject(profile.id, $scope.filter);
}
$scope.filterContainers = (container) => {
if (!Core.isBlank($scope.versionIdFilter) && container.versionId !== $scope.versionIdFilter) {
return false;
}
if (!Core.isBlank($scope.profileIdFilter) && !container.profileIds.any($scope.profileIdFilter)) {
return false;
}
if (!Core.isBlank($scope.locationIdFilter) && container.location !== $scope.locationIdFilter) {
return false;
}
return FilterHelpers.searchObject(container.id, $scope.filter);
}
$scope.filterContainer = $scope.filterContainers;
$scope.viewProfile = (profile:Profile) => {
Fabric.gotoProfile(workspace, jolokia, workspace.localStorage, $location, profile.version, profile.id);
};
function maybeAdd(group: Array<any>, thing:any, index:string) {
if (angular.isArray(thing)) {
thing.forEach((i) => { maybeAdd(group, i, index); });
} else {
if (!group.any((item) => { return thing[index] === item[index] })) {
group.add(thing);
}
}
}
function groupByVersions(containers:Array<Container>) {
var answer = {};
containers.forEach((container) => {
var versionId = container.versionId;
var version = answer[versionId] || { containers: <Array<Container>>[], profiles: <Array<Profile>>[] };
maybeAdd(version.containers, container, 'id');
maybeAdd(version.profiles, container.profiles, 'id');
answer[versionId] = version;
});
return answer;
}
function groupByLocation(containers:Array<Container>) {
var answer = {};
containers.forEach((container) => {
var location = container.location;
var loc = answer[location] || { containers: Array<Container>() };
maybeAdd(loc.containers, container, 'id');
answer[location] = loc;
});
return answer;
}
Fabric.loadRestApi(jolokia, workspace, undefined, (response) => {
$scope.restApiUrl = UrlHelpers.maybeProxy(Core.injector.get('jolokiaUrl'), response.value);
log.debug("Scope rest API: ", $scope.restApiUrl);
Core.registerForChanges(jolokia, $scope, {
type: 'exec',
mbean: Fabric.managerMBean,
operation: 'containers(java.util.List, java.util.List)',
arguments:[containerFields, profileFields]
}, (response) => {
var containers = response.value;
SelectionHelpers.sync($scope.selectedContainers, containers, 'id');
var versions = {};
var locations = {};
// massage the returned data a bit first
containers.forEach((container) => {
if (Core.isBlank(container.location)) {
container.location = ContainerHelpers.NO_LOCATION;
}
container.profiles = container.profiles.filter((p) => { return !p.hidden });
container.icon = Fabric.getTypeIcon(container);
container.services = Fabric.getServiceList(container);
});
var versions = groupByVersions(containers);
angular.forEach(versions, (version, versionId) => {
version.profiles.forEach((profile) => {
var containers = version.containers.filter((c) => { return c.profileIds.some(profile.id); });
profile.aliveCount = containers.count((c) => { return c.alive; });
profile.deadCount = containers.length - profile.aliveCount;
profile.summary = profile.summaryMarkdown ? marked(profile.summaryMarkdown) : '';
profile.iconURL = Fabric.toIconURL($scope, profile.iconURL);
profile.tags = ProfileHelpers.getTags(profile);
});
});
var locations = groupByLocation(containers);
var locationIds = ContainerHelpers.extractLocations(containers);
$scope.locationMenu = ContainerHelpers.buildLocationMenu($scope, jolokia, locationIds);
// grouped by location
$scope.locations = locations;
// grouped by version/profile
$scope.versions = versions;
// Sort by id with child containers grouped under parents
var sortedContainers = containers.sortBy('id');
var rootContainers = sortedContainers.exclude((c) => { return !c.root; });
var childContainers = sortedContainers.exclude((c) => { return c.root; });
if (childContainers.length > 0) {
var tmp = [];
rootContainers.each((c) => {
tmp.add(c);
var children = childContainers.exclude((child) => { return child.parentId !== c.id });
tmp.add(children);
});
containers = tmp;
}
$scope.containers = containers;
Core.$apply($scope);
});
Core.registerForChanges(jolokia, $scope, {
type: 'read',
mbean: Fabric.clusterManagerMBean,
attribute: 'EnsembleContainers'
}, (response) => {
$scope.ensembleContainerIds = response.value;
Core.$apply($scope);
});
});
}]);
}
| mposolda/hawtio | hawtio-web/src/main/webapp/app/fabric/js/containerView.ts | TypeScript | apache-2.0 | 8,985 |
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hbase.master;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.NavigableMap;
import java.util.Set;
import java.util.TreeMap;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentSkipListSet;
import java.util.concurrent.CopyOnWriteArrayList;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hbase.Chore;
import org.apache.hadoop.hbase.HBaseIOException;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.HRegionInfo;
import org.apache.hadoop.hbase.NotServingRegionException;
import org.apache.hadoop.hbase.RegionTransition;
import org.apache.hadoop.hbase.Server;
import org.apache.hadoop.hbase.ServerName;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.hadoop.hbase.TableNotFoundException;
import org.apache.hadoop.hbase.catalog.CatalogTracker;
import org.apache.hadoop.hbase.catalog.MetaReader;
import org.apache.hadoop.hbase.client.Result;
import org.apache.hadoop.hbase.exceptions.DeserializationException;
import org.apache.hadoop.hbase.executor.EventHandler;
import org.apache.hadoop.hbase.executor.EventType;
import org.apache.hadoop.hbase.executor.ExecutorService;
import org.apache.hadoop.hbase.ipc.RpcClient;
import org.apache.hadoop.hbase.ipc.RpcClient.FailedServerException;
import org.apache.hadoop.hbase.ipc.ServerNotRunningYetException;
import org.apache.hadoop.hbase.master.RegionState.State;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeAssignmentHelper;
import org.apache.hadoop.hbase.master.balancer.FavoredNodeLoadBalancer;
import org.apache.hadoop.hbase.master.handler.ClosedRegionHandler;
import org.apache.hadoop.hbase.master.handler.DisableTableHandler;
import org.apache.hadoop.hbase.master.handler.EnableTableHandler;
import org.apache.hadoop.hbase.master.handler.OpenedRegionHandler;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition;
import org.apache.hadoop.hbase.protobuf.generated.RegionServerStatusProtos.RegionStateTransition.TransitionCode;
import org.apache.hadoop.hbase.regionserver.RegionAlreadyInTransitionException;
import org.apache.hadoop.hbase.regionserver.RegionMergeTransaction;
import org.apache.hadoop.hbase.regionserver.RegionOpeningState;
import org.apache.hadoop.hbase.regionserver.RegionServerStoppedException;
import org.apache.hadoop.hbase.regionserver.SplitTransaction;
import org.apache.hadoop.hbase.regionserver.wal.HLog;
import org.apache.hadoop.hbase.regionserver.wal.HLogUtil;
import org.apache.hadoop.hbase.util.ConfigUtil;
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
import org.apache.hadoop.hbase.util.FSUtils;
import org.apache.hadoop.hbase.util.KeyLocker;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hadoop.hbase.util.PairOfSameType;
import org.apache.hadoop.hbase.util.Threads;
import org.apache.hadoop.hbase.util.Triple;
import org.apache.hadoop.hbase.zookeeper.MetaRegionTracker;
import org.apache.hadoop.hbase.zookeeper.ZKAssign;
import org.apache.hadoop.hbase.zookeeper.ZKTable;
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
import org.apache.hadoop.hbase.zookeeper.ZooKeeperListener;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.zookeeper.AsyncCallback;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.KeeperException.NoNodeException;
import org.apache.zookeeper.KeeperException.NodeExistsException;
import org.apache.zookeeper.data.Stat;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.LinkedHashMultimap;
/**
* Manages and performs region assignment.
* <p>
* Monitors ZooKeeper for events related to regions in transition.
* <p>
* Handles existing regions in transition during master failover.
*/
@InterfaceAudience.Private
public class AssignmentManager extends ZooKeeperListener {
private static final Log LOG = LogFactory.getLog(AssignmentManager.class);
public static final ServerName HBCK_CODE_SERVERNAME = ServerName.valueOf(HConstants.HBCK_CODE_NAME,
-1, -1L);
public static final String ASSIGNMENT_TIMEOUT = "hbase.master.assignment.timeoutmonitor.timeout";
public static final int DEFAULT_ASSIGNMENT_TIMEOUT_DEFAULT = 600000;
public static final String ASSIGNMENT_TIMEOUT_MANAGEMENT = "hbase.assignment.timeout.management";
public static final boolean DEFAULT_ASSIGNMENT_TIMEOUT_MANAGEMENT = false;
public static final String ALREADY_IN_TRANSITION_WAITTIME
= "hbase.assignment.already.intransition.waittime";
public static final int DEFAULT_ALREADY_IN_TRANSITION_WAITTIME = 60000; // 1 minute
protected final Server server;
private ServerManager serverManager;
private boolean shouldAssignRegionsWithFavoredNodes;
private CatalogTracker catalogTracker;
protected final TimeoutMonitor timeoutMonitor;
private final TimerUpdater timerUpdater;
private LoadBalancer balancer;
private final MetricsAssignmentManager metricsAssignmentManager;
private final TableLockManager tableLockManager;
private AtomicInteger numRegionsOpened = new AtomicInteger(0);
final private KeyLocker<String> locker = new KeyLocker<String>();
/**
* Map of regions to reopen after the schema of a table is changed. Key -
* encoded region name, value - HRegionInfo
*/
private final Map <String, HRegionInfo> regionsToReopen;
/*
* Maximum times we recurse an assignment/unassignment.
* See below in {@link #assign()} and {@link #unassign()}.
*/
private final int maximumAttempts;
/**
* Map of two merging regions from the region to be created.
*/
private final Map<String, PairOfSameType<HRegionInfo>> mergingRegions
= new HashMap<String, PairOfSameType<HRegionInfo>>();
/**
* The sleep time for which the assignment will wait before retrying in case of hbase:meta assignment
* failure due to lack of availability of region plan
*/
private final long sleepTimeBeforeRetryingMetaAssignment;
/** Plans for region movement. Key is the encoded version of a region name*/
// TODO: When do plans get cleaned out? Ever? In server open and in server
// shutdown processing -- St.Ack
// All access to this Map must be synchronized.
final NavigableMap<String, RegionPlan> regionPlans =
new TreeMap<String, RegionPlan>();
private final ZKTable zkTable;
/**
* Contains the server which need to update timer, these servers will be
* handled by {@link TimerUpdater}
*/
private final ConcurrentSkipListSet<ServerName> serversInUpdatingTimer;
private final ExecutorService executorService;
// For unit tests, keep track of calls to ClosedRegionHandler
private Map<HRegionInfo, AtomicBoolean> closedRegionHandlerCalled = null;
// For unit tests, keep track of calls to OpenedRegionHandler
private Map<HRegionInfo, AtomicBoolean> openedRegionHandlerCalled = null;
//Thread pool executor service for timeout monitor
private java.util.concurrent.ExecutorService threadPoolExecutorService;
// A bunch of ZK events workers. Each is a single thread executor service
private final java.util.concurrent.ExecutorService zkEventWorkers;
private List<EventType> ignoreStatesRSOffline = Arrays.asList(
EventType.RS_ZK_REGION_FAILED_OPEN, EventType.RS_ZK_REGION_CLOSED);
private final RegionStates regionStates;
// The threshold to use bulk assigning. Using bulk assignment
// only if assigning at least this many regions to at least this
// many servers. If assigning fewer regions to fewer servers,
// bulk assigning may be not as efficient.
private final int bulkAssignThresholdRegions;
private final int bulkAssignThresholdServers;
// Should bulk assignment wait till all regions are assigned,
// or it is timed out? This is useful to measure bulk assignment
// performance, but not needed in most use cases.
private final boolean bulkAssignWaitTillAllAssigned;
/**
* Indicator that AssignmentManager has recovered the region states so
* that ServerShutdownHandler can be fully enabled and re-assign regions
* of dead servers. So that when re-assignment happens, AssignmentManager
* has proper region states.
*
* Protected to ease testing.
*/
protected final AtomicBoolean failoverCleanupDone = new AtomicBoolean(false);
/** Is the TimeOutManagement activated **/
private final boolean tomActivated;
/**
* A map to track the count a region fails to open in a row.
* So that we don't try to open a region forever if the failure is
* unrecoverable. We don't put this information in region states
* because we don't expect this to happen frequently; we don't
* want to copy this information over during each state transition either.
*/
private final ConcurrentHashMap<String, AtomicInteger>
failedOpenTracker = new ConcurrentHashMap<String, AtomicInteger>();
// A flag to indicate if we are using ZK for region assignment
private final boolean useZKForAssignment;
// In case not using ZK for region assignment, region states
// are persisted in meta with a state store
private final RegionStateStore regionStateStore;
/**
* For testing only! Set to true to skip handling of split.
*/
@edu.umd.cs.findbugs.annotations.SuppressWarnings(value="MS_SHOULD_BE_FINAL")
public static boolean TEST_SKIP_SPLIT_HANDLING = false;
/** Listeners that are called on assignment events. */
private List<AssignmentListener> listeners = new CopyOnWriteArrayList<AssignmentListener>();
/**
* Constructs a new assignment manager.
*
* @param server
* @param serverManager
* @param catalogTracker
* @param service
* @throws KeeperException
* @throws IOException
*/
public AssignmentManager(Server server, ServerManager serverManager,
CatalogTracker catalogTracker, final LoadBalancer balancer,
final ExecutorService service, MetricsMaster metricsMaster,
final TableLockManager tableLockManager) throws KeeperException, IOException {
super(server.getZooKeeper());
this.server = server;
this.serverManager = serverManager;
this.catalogTracker = catalogTracker;
this.executorService = service;
this.regionStateStore = new RegionStateStore(server);
this.regionsToReopen = Collections.synchronizedMap
(new HashMap<String, HRegionInfo> ());
Configuration conf = server.getConfiguration();
// Only read favored nodes if using the favored nodes load balancer.
this.shouldAssignRegionsWithFavoredNodes = conf.getClass(
HConstants.HBASE_MASTER_LOADBALANCER_CLASS, Object.class).equals(
FavoredNodeLoadBalancer.class);
this.tomActivated = conf.getBoolean(
ASSIGNMENT_TIMEOUT_MANAGEMENT, DEFAULT_ASSIGNMENT_TIMEOUT_MANAGEMENT);
if (tomActivated){
this.serversInUpdatingTimer = new ConcurrentSkipListSet<ServerName>();
this.timeoutMonitor = new TimeoutMonitor(
conf.getInt("hbase.master.assignment.timeoutmonitor.period", 30000),
server, serverManager,
conf.getInt(ASSIGNMENT_TIMEOUT, DEFAULT_ASSIGNMENT_TIMEOUT_DEFAULT));
this.timerUpdater = new TimerUpdater(conf.getInt(
"hbase.master.assignment.timerupdater.period", 10000), server);
Threads.setDaemonThreadRunning(timerUpdater.getThread(),
server.getServerName() + ".timerUpdater");
} else {
this.serversInUpdatingTimer = null;
this.timeoutMonitor = null;
this.timerUpdater = null;
}
this.zkTable = new ZKTable(this.watcher);
// This is the max attempts, not retries, so it should be at least 1.
this.maximumAttempts = Math.max(1,
this.server.getConfiguration().getInt("hbase.assignment.maximum.attempts", 10));
this.sleepTimeBeforeRetryingMetaAssignment = this.server.getConfiguration().getLong(
"hbase.meta.assignment.retry.sleeptime", 1000l);
this.balancer = balancer;
int maxThreads = conf.getInt("hbase.assignment.threads.max", 30);
this.threadPoolExecutorService = Threads.getBoundedCachedThreadPool(
maxThreads, 60L, TimeUnit.SECONDS, Threads.newDaemonThreadFactory("AM."));
this.regionStates = new RegionStates(server, serverManager, regionStateStore);
this.bulkAssignWaitTillAllAssigned =
conf.getBoolean("hbase.bulk.assignment.waittillallassigned", false);
this.bulkAssignThresholdRegions = conf.getInt("hbase.bulk.assignment.threshold.regions", 7);
this.bulkAssignThresholdServers = conf.getInt("hbase.bulk.assignment.threshold.servers", 3);
int workers = conf.getInt("hbase.assignment.zkevent.workers", 20);
ThreadFactory threadFactory = Threads.newDaemonThreadFactory("AM.ZK.Worker");
zkEventWorkers = Threads.getBoundedCachedThreadPool(workers, 60L,
TimeUnit.SECONDS, threadFactory);
this.tableLockManager = tableLockManager;
this.metricsAssignmentManager = new MetricsAssignmentManager();
useZKForAssignment = ConfigUtil.useZKForAssignment(conf);
}
void startTimeOutMonitor() {
if (tomActivated) {
Threads.setDaemonThreadRunning(timeoutMonitor.getThread(), server.getServerName()
+ ".timeoutMonitor");
}
}
/**
* Add the listener to the notification list.
* @param listener The AssignmentListener to register
*/
public void registerListener(final AssignmentListener listener) {
this.listeners.add(listener);
}
/**
* Remove the listener from the notification list.
* @param listener The AssignmentListener to unregister
*/
public boolean unregisterListener(final AssignmentListener listener) {
return this.listeners.remove(listener);
}
/**
* @return Instance of ZKTable.
*/
public ZKTable getZKTable() {
// These are 'expensive' to make involving trip to zk ensemble so allow
// sharing.
return this.zkTable;
}
/**
* This SHOULD not be public. It is public now
* because of some unit tests.
*
* TODO: make it package private and keep RegionStates in the master package
*/
public RegionStates getRegionStates() {
return regionStates;
}
/**
* Used in some tests to mock up region state in meta
*/
@VisibleForTesting
RegionStateStore getRegionStateStore() {
return regionStateStore;
}
public RegionPlan getRegionReopenPlan(HRegionInfo hri) {
return new RegionPlan(hri, null, regionStates.getRegionServerOfRegion(hri));
}
/**
* Add a regionPlan for the specified region.
* @param encodedName
* @param plan
*/
public void addPlan(String encodedName, RegionPlan plan) {
synchronized (regionPlans) {
regionPlans.put(encodedName, plan);
}
}
/**
* Add a map of region plans.
*/
public void addPlans(Map<String, RegionPlan> plans) {
synchronized (regionPlans) {
regionPlans.putAll(plans);
}
}
/**
* Set the list of regions that will be reopened
* because of an update in table schema
*
* @param regions
* list of regions that should be tracked for reopen
*/
public void setRegionsToReopen(List <HRegionInfo> regions) {
for(HRegionInfo hri : regions) {
regionsToReopen.put(hri.getEncodedName(), hri);
}
}
/**
* Used by the client to identify if all regions have the schema updates
*
* @param tableName
* @return Pair indicating the status of the alter command
* @throws IOException
*/
public Pair<Integer, Integer> getReopenStatus(TableName tableName)
throws IOException {
List <HRegionInfo> hris =
MetaReader.getTableRegions(this.server.getCatalogTracker(), tableName, true);
Integer pending = 0;
for (HRegionInfo hri : hris) {
String name = hri.getEncodedName();
// no lock concurrent access ok: sequential consistency respected.
if (regionsToReopen.containsKey(name)
|| regionStates.isRegionInTransition(name)) {
pending++;
}
}
return new Pair<Integer, Integer>(pending, hris.size());
}
/**
* Used by ServerShutdownHandler to make sure AssignmentManager has completed
* the failover cleanup before re-assigning regions of dead servers. So that
* when re-assignment happens, AssignmentManager has proper region states.
*/
public boolean isFailoverCleanupDone() {
return failoverCleanupDone.get();
}
/**
* To avoid racing with AM, external entities may need to lock a region,
* for example, when SSH checks what regions to skip re-assigning.
*/
public Lock acquireRegionLock(final String encodedName) {
return locker.acquireLock(encodedName);
}
/**
* Now, failover cleanup is completed. Notify server manager to
* process queued up dead servers processing, if any.
*/
void failoverCleanupDone() {
failoverCleanupDone.set(true);
serverManager.processQueuedDeadServers();
}
/**
* Called on startup.
* Figures whether a fresh cluster start of we are joining extant running cluster.
* @throws IOException
* @throws KeeperException
* @throws InterruptedException
*/
void joinCluster() throws IOException,
KeeperException, InterruptedException {
long startTime = System.currentTimeMillis();
// Concurrency note: In the below the accesses on regionsInTransition are
// outside of a synchronization block where usually all accesses to RIT are
// synchronized. The presumption is that in this case it is safe since this
// method is being played by a single thread on startup.
// TODO: Regions that have a null location and are not in regionsInTransitions
// need to be handled.
// Scan hbase:meta to build list of existing regions, servers, and assignment
// Returns servers who have not checked in (assumed dead) and their regions
Map<ServerName, List<HRegionInfo>> deadServers = rebuildUserRegions();
// This method will assign all user regions if a clean server startup or
// it will reconstruct master state and cleanup any leftovers from
// previous master process.
boolean failover = processDeadServersAndRegionsInTransition(deadServers);
if (!useZKForAssignment) {
// Not use ZK for assignment any more, remove the ZNode
ZKUtil.deleteNodeRecursively(watcher, watcher.assignmentZNode);
}
recoverTableInDisablingState();
recoverTableInEnablingState();
LOG.info("Joined the cluster in " + (System.currentTimeMillis()
- startTime) + "ms, failover=" + failover);
}
/**
* Process all regions that are in transition in zookeeper and also
* processes the list of dead servers by scanning the META.
* Used by master joining an cluster. If we figure this is a clean cluster
* startup, will assign all user regions.
* @param deadServers
* Map of dead servers and their regions. Can be null.
* @throws KeeperException
* @throws IOException
* @throws InterruptedException
*/
boolean processDeadServersAndRegionsInTransition(
final Map<ServerName, List<HRegionInfo>> deadServers)
throws KeeperException, IOException, InterruptedException {
List<String> nodes = ZKUtil.listChildrenNoWatch(watcher,
watcher.assignmentZNode);
if (nodes == null && useZKForAssignment) {
String errorMessage = "Failed to get the children from ZK";
server.abort(errorMessage, new IOException(errorMessage));
return true; // Doesn't matter in this case
}
boolean failover = !serverManager.getDeadServers().isEmpty();
if (failover) {
// This may not be a failover actually, especially if meta is on this master.
if (LOG.isDebugEnabled()) {
LOG.debug("Found dead servers out on cluster " + serverManager.getDeadServers());
}
} else {
// If any one region except meta is assigned, it's a failover.
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
for (Map.Entry<HRegionInfo, ServerName> en : regionStates.getRegionAssignments().entrySet()) {
HRegionInfo hri = en.getKey();
if (!hri.isMetaTable() && onlineServers.contains(en.getValue())) {
LOG.debug("Found " + hri + " out on cluster");
failover = true;
break;
}
}
}
if (!failover && nodes != null) {
// If any one region except meta is in transition, it's a failover.
for (String encodedName : nodes) {
RegionState regionState = regionStates.getRegionState(encodedName);
if (regionState != null && !regionState.getRegion().isMetaRegion()) {
LOG.debug("Found " + regionState + " in RITs");
failover = true;
break;
}
}
}
if (!failover && !useZKForAssignment) {
// If any region except meta is in transition on a live server, it's a failover.
Map<String, RegionState> regionsInTransition = regionStates.getRegionsInTransition();
if (!regionsInTransition.isEmpty()) {
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
for (RegionState regionState : regionsInTransition.values()) {
if (!regionState.getRegion().isMetaRegion()
&& onlineServers.contains(regionState.getServerName())) {
LOG.debug("Found " + regionState + " in RITs");
failover = true;
break;
}
}
}
}
if (!failover) {
// If we get here, we have a full cluster restart. It is a failover only
// if there are some HLogs are not split yet. For meta HLogs, they should have
// been split already, if any. We can walk through those queued dead servers,
// if they don't have any HLogs, this restart should be considered as a clean one
Set<ServerName> queuedDeadServers = serverManager.getRequeuedDeadServers().keySet();
if (!queuedDeadServers.isEmpty()) {
Configuration conf = server.getConfiguration();
Path rootdir = FSUtils.getRootDir(conf);
FileSystem fs = rootdir.getFileSystem(conf);
for (ServerName serverName : queuedDeadServers) {
Path logDir = new Path(rootdir, HLogUtil.getHLogDirectoryName(serverName.toString()));
Path splitDir = logDir.suffix(HLog.SPLITTING_EXT);
if (fs.exists(logDir) || fs.exists(splitDir)) {
LOG.debug("Found queued dead server " + serverName);
failover = true;
break;
}
}
if (!failover) {
// We figured that it's not a failover, so no need to
// work on these re-queued dead servers any more.
LOG.info("AM figured that it's not a failover and cleaned up " + queuedDeadServers.size()
+ " queued dead servers");
serverManager.removeRequeuedDeadServers();
}
}
}
Set<TableName> disabledOrDisablingOrEnabling = null;
if (!failover) {
disabledOrDisablingOrEnabling = ZKTable.getDisabledOrDisablingTables(watcher);
disabledOrDisablingOrEnabling.addAll(ZKTable.getEnablingTables(watcher));
// Clean re/start, mark all user regions closed before reassignment
// TODO -Hbase-11319
regionStates.closeAllUserRegions(disabledOrDisablingOrEnabling);
}
// Now region states are restored
regionStateStore.start();
// If we found user regions out on cluster, its a failover.
if (failover) {
LOG.info("Found regions out on cluster or in RIT; presuming failover");
// Process list of dead servers and regions in RIT.
// See HBASE-4580 for more information.
processDeadServersAndRecoverLostRegions(deadServers);
}
if (!failover && useZKForAssignment) {
// Cleanup any existing ZK nodes and start watching
ZKAssign.deleteAllNodes(watcher);
ZKUtil.listChildrenAndWatchForNewChildren(this.watcher, this.watcher.assignmentZNode);
}
// Now we can safely claim failover cleanup completed and enable
// ServerShutdownHandler for further processing. The nodes (below)
// in transition, if any, are for regions not related to those
// dead servers at all, and can be done in parallel to SSH.
failoverCleanupDone();
if (!failover) {
// Fresh cluster startup.
LOG.info("Clean cluster startup. Assigning user regions");
assignAllUserRegions(disabledOrDisablingOrEnabling);
}
return failover;
}
/**
* If region is up in zk in transition, then do fixup and block and wait until
* the region is assigned and out of transition. Used on startup for
* catalog regions.
* @param hri Region to look for.
* @return True if we processed a region in transition else false if region
* was not up in zk in transition.
* @throws InterruptedException
* @throws KeeperException
* @throws IOException
*/
boolean processRegionInTransitionAndBlockUntilAssigned(final HRegionInfo hri)
throws InterruptedException, KeeperException, IOException {
String encodedRegionName = hri.getEncodedName();
if (!processRegionInTransition(encodedRegionName, hri)) {
return false; // The region is not in transition
}
LOG.debug("Waiting on " + HRegionInfo.prettyPrint(encodedRegionName));
while (!this.server.isStopped() &&
this.regionStates.isRegionInTransition(encodedRegionName)) {
RegionState state = this.regionStates.getRegionTransitionState(encodedRegionName);
if (state == null || !serverManager.isServerOnline(state.getServerName())) {
// The region is not in transition, or not in transition on an online
// server. Doesn't help to block here any more. Caller need to
// verify the region is actually assigned.
break;
}
this.regionStates.waitForUpdate(100);
}
return true;
}
/**
* Process failover of new master for region <code>encodedRegionName</code>
* up in zookeeper.
* @param encodedRegionName Region to process failover for.
* @param regionInfo If null we'll go get it from meta table.
* @return True if we processed <code>regionInfo</code> as a RIT.
* @throws KeeperException
* @throws IOException
*/
boolean processRegionInTransition(final String encodedRegionName,
final HRegionInfo regionInfo) throws KeeperException, IOException {
// We need a lock here to ensure that we will not put the same region twice
// It has no reason to be a lock shared with the other operations.
// We can do the lock on the region only, instead of a global lock: what we want to ensure
// is that we don't have two threads working on the same region.
Lock lock = locker.acquireLock(encodedRegionName);
try {
Stat stat = new Stat();
byte [] data = ZKAssign.getDataAndWatch(watcher, encodedRegionName, stat);
if (data == null) return false;
RegionTransition rt;
try {
rt = RegionTransition.parseFrom(data);
} catch (DeserializationException e) {
LOG.warn("Failed parse znode data", e);
return false;
}
HRegionInfo hri = regionInfo;
if (hri == null) {
// The region info is not passed in. We will try to find the region
// from region states map/meta based on the encoded region name. But we
// may not be able to find it. This is valid for online merge that
// the region may have not been created if the merge is not completed.
// Therefore, it is not in meta at master recovery time.
hri = regionStates.getRegionInfo(rt.getRegionName());
EventType et = rt.getEventType();
if (hri == null && et != EventType.RS_ZK_REGION_MERGING
&& et != EventType.RS_ZK_REQUEST_REGION_MERGE) {
LOG.warn("Couldn't find the region in recovering " + rt);
return false;
}
}
return processRegionsInTransition(
rt, hri, stat.getVersion());
} finally {
lock.unlock();
}
}
/**
* This call is invoked only (1) master assign meta;
* (2) during failover mode startup, zk assignment node processing.
* The locker is set in the caller. It returns true if the region
* is in transition for sure, false otherwise.
*
* It should be private but it is used by some test too.
*/
boolean processRegionsInTransition(
final RegionTransition rt, final HRegionInfo regionInfo,
final int expectedVersion) throws KeeperException {
EventType et = rt.getEventType();
// Get ServerName. Could not be null.
final ServerName sn = rt.getServerName();
final byte[] regionName = rt.getRegionName();
final String encodedName = HRegionInfo.encodeRegionName(regionName);
final String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName);
LOG.info("Processing " + prettyPrintedRegionName + " in state: " + et);
if (regionStates.isRegionInTransition(encodedName)
&& (regionInfo.isMetaRegion() || !useZKForAssignment)) {
LOG.info("Processed region " + prettyPrintedRegionName + " in state: "
+ et + ", does nothing since the region is already in transition "
+ regionStates.getRegionTransitionState(encodedName));
// Just return
return true;
}
if (!serverManager.isServerOnline(sn)) {
// It was transitioning on a dead server, so it's closed now.
// Force to OFFLINE and put it in transition, but not assign it
// since log splitting for the dead server is not done yet.
LOG.debug("RIT " + encodedName + " in state=" + rt.getEventType() +
" was on deadserver; forcing offline");
if (regionStates.isRegionOnline(regionInfo)) {
// Meta could still show the region is assigned to the previous
// server. If that server is online, when we reload the meta, the
// region is put back to online, we need to offline it.
regionStates.regionOffline(regionInfo);
sendRegionClosedNotification(regionInfo);
}
// Put it back in transition so that SSH can re-assign it
regionStates.updateRegionState(regionInfo, State.OFFLINE, sn);
if (regionInfo.isMetaRegion()) {
// If it's meta region, reset the meta location.
// So that master knows the right meta region server.
MetaRegionTracker.setMetaLocation(watcher, sn);
} else {
// No matter the previous server is online or offline,
// we need to reset the last region server of the region.
regionStates.setLastRegionServerOfRegion(sn, encodedName);
// Make sure we know the server is dead.
if (!serverManager.isServerDead(sn)) {
serverManager.expireServer(sn);
}
}
return false;
}
switch (et) {
case M_ZK_REGION_CLOSING:
// Insert into RIT & resend the query to the region server: may be the previous master
// died before sending the query the first time.
final RegionState rsClosing = regionStates.updateRegionState(rt, State.CLOSING);
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
ReentrantLock lock = locker.acquireLock(regionInfo.getEncodedName());
try {
unassign(regionInfo, rsClosing, expectedVersion, null, useZKForAssignment, null);
if (regionStates.isRegionOffline(regionInfo)) {
assign(regionInfo, true);
}
} finally {
lock.unlock();
}
}
});
break;
case RS_ZK_REGION_CLOSED:
case RS_ZK_REGION_FAILED_OPEN:
// Region is closed, insert into RIT and handle it
regionStates.updateRegionState(regionInfo, State.CLOSED, sn);
invokeAssign(regionInfo);
break;
case M_ZK_REGION_OFFLINE:
// Insert in RIT and resend to the regionserver
regionStates.updateRegionState(rt, State.PENDING_OPEN);
final RegionState rsOffline = regionStates.getRegionState(regionInfo);
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
ReentrantLock lock = locker.acquireLock(regionInfo.getEncodedName());
try {
RegionPlan plan = new RegionPlan(regionInfo, null, sn);
addPlan(encodedName, plan);
assign(rsOffline, false, false);
} finally {
lock.unlock();
}
}
});
break;
case RS_ZK_REGION_OPENING:
regionStates.updateRegionState(rt, State.OPENING);
break;
case RS_ZK_REGION_OPENED:
// Region is opened, insert into RIT and handle it
// This could be done asynchronously, we would need then to acquire the lock in the
// handler.
regionStates.updateRegionState(rt, State.OPEN);
new OpenedRegionHandler(server, this, regionInfo, sn, expectedVersion).process();
break;
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REGION_SPLIT:
// Splitting region should be online. We could have skipped it during
// user region rebuilding since we may consider the split is completed.
// Put it in SPLITTING state to avoid complications.
regionStates.regionOnline(regionInfo, sn);
regionStates.updateRegionState(rt, State.SPLITTING);
if (!handleRegionSplitting(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteSplittingNode(encodedName, sn);
}
break;
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGING:
case RS_ZK_REGION_MERGED:
if (!handleRegionMerging(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteMergingNode(encodedName, sn);
}
break;
default:
throw new IllegalStateException("Received region in state:" + et + " is not valid.");
}
LOG.info("Processed region " + prettyPrintedRegionName + " in state "
+ et + ", on " + (serverManager.isServerOnline(sn) ? "" : "dead ")
+ "server: " + sn);
return true;
}
/**
* When a region is closed, it should be removed from the regionsToReopen
* @param hri HRegionInfo of the region which was closed
*/
public void removeClosedRegion(HRegionInfo hri) {
if (regionsToReopen.remove(hri.getEncodedName()) != null) {
LOG.debug("Removed region from reopening regions because it was closed");
}
}
/**
* Handles various states an unassigned node can be in.
* <p>
* Method is called when a state change is suspected for an unassigned node.
* <p>
* This deals with skipped transitions (we got a CLOSED but didn't see CLOSING
* yet).
* @param rt
* @param expectedVersion
*/
void handleRegion(final RegionTransition rt, int expectedVersion) {
if (rt == null) {
LOG.warn("Unexpected NULL input for RegionTransition rt");
return;
}
final ServerName sn = rt.getServerName();
// Check if this is a special HBCK transition
if (sn.equals(HBCK_CODE_SERVERNAME)) {
handleHBCK(rt);
return;
}
final long createTime = rt.getCreateTime();
final byte[] regionName = rt.getRegionName();
String encodedName = HRegionInfo.encodeRegionName(regionName);
String prettyPrintedRegionName = HRegionInfo.prettyPrint(encodedName);
// Verify this is a known server
if (!serverManager.isServerOnline(sn)
&& !ignoreStatesRSOffline.contains(rt.getEventType())) {
LOG.warn("Attempted to handle region transition for server but " +
"it is not online: " + prettyPrintedRegionName + ", " + rt);
return;
}
RegionState regionState =
regionStates.getRegionState(encodedName);
long startTime = System.currentTimeMillis();
if (LOG.isDebugEnabled()) {
boolean lateEvent = createTime < (startTime - 15000);
LOG.debug("Handling " + rt.getEventType() +
", server=" + sn + ", region=" +
(prettyPrintedRegionName == null ? "null" : prettyPrintedRegionName) +
(lateEvent ? ", which is more than 15 seconds late" : "") +
", current_state=" + regionState);
}
// We don't do anything for this event,
// so separate it out, no need to lock/unlock anything
if (rt.getEventType() == EventType.M_ZK_REGION_OFFLINE) {
return;
}
// We need a lock on the region as we could update it
Lock lock = locker.acquireLock(encodedName);
try {
RegionState latestState =
regionStates.getRegionState(encodedName);
if ((regionState == null && latestState != null)
|| (regionState != null && latestState == null)
|| (regionState != null && latestState != null
&& latestState.getState() != regionState.getState())) {
LOG.warn("Region state changed from " + regionState + " to "
+ latestState + ", while acquiring lock");
}
long waitedTime = System.currentTimeMillis() - startTime;
if (waitedTime > 5000) {
LOG.warn("Took " + waitedTime + "ms to acquire the lock");
}
regionState = latestState;
switch (rt.getEventType()) {
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REGION_SPLIT:
if (!handleRegionSplitting(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteSplittingNode(encodedName, sn);
}
break;
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGING:
case RS_ZK_REGION_MERGED:
// Merged region is a new region, we can't find it in the region states now.
// However, the two merging regions are not new. They should be in state for merging.
if (!handleRegionMerging(
rt, encodedName, prettyPrintedRegionName, sn)) {
deleteMergingNode(encodedName, sn);
}
break;
case M_ZK_REGION_CLOSING:
// Should see CLOSING after we have asked it to CLOSE or additional
// times after already being in state of CLOSING
if (regionState == null
|| !regionState.isPendingCloseOrClosingOnServer(sn)) {
LOG.warn("Received CLOSING for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_CLOSE/CLOSING here: "
+ regionStates.getRegionState(encodedName));
return;
}
// Transition to CLOSING (or update stamp if already CLOSING)
regionStates.updateRegionState(rt, State.CLOSING);
break;
case RS_ZK_REGION_CLOSED:
// Should see CLOSED after CLOSING but possible after PENDING_CLOSE
if (regionState == null
|| !regionState.isPendingCloseOrClosingOnServer(sn)) {
LOG.warn("Received CLOSED for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_CLOSE/CLOSING here: "
+ regionStates.getRegionState(encodedName));
return;
}
// Handle CLOSED by assigning elsewhere or stopping if a disable
// If we got here all is good. Need to update RegionState -- else
// what follows will fail because not in expected state.
new ClosedRegionHandler(server, this, regionState.getRegion()).process();
updateClosedRegionHandlerTracker(regionState.getRegion());
break;
case RS_ZK_REGION_FAILED_OPEN:
if (regionState == null
|| !regionState.isPendingOpenOrOpeningOnServer(sn)) {
LOG.warn("Received FAILED_OPEN for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: "
+ regionStates.getRegionState(encodedName));
return;
}
AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName);
if (failedOpenCount == null) {
failedOpenCount = new AtomicInteger();
// No need to use putIfAbsent, or extra synchronization since
// this whole handleRegion block is locked on the encoded region
// name, and failedOpenTracker is updated only in this block
failedOpenTracker.put(encodedName, failedOpenCount);
}
if (failedOpenCount.incrementAndGet() >= maximumAttempts) {
regionStates.updateRegionState(rt, State.FAILED_OPEN);
// remove the tracking info to save memory, also reset
// the count for next open initiative
failedOpenTracker.remove(encodedName);
} else {
// Handle this the same as if it were opened and then closed.
regionState = regionStates.updateRegionState(rt, State.CLOSED);
if (regionState != null) {
// When there are more than one region server a new RS is selected as the
// destination and the same is updated in the regionplan. (HBASE-5546)
try {
getRegionPlan(regionState.getRegion(), sn, true);
new ClosedRegionHandler(server, this, regionState.getRegion()).process();
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
}
}
break;
case RS_ZK_REGION_OPENING:
// Should see OPENING after we have asked it to OPEN or additional
// times after already being in state of OPENING
if (regionState == null
|| !regionState.isPendingOpenOrOpeningOnServer(sn)) {
LOG.warn("Received OPENING for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: "
+ regionStates.getRegionState(encodedName));
return;
}
// Transition to OPENING (or update stamp if already OPENING)
regionStates.updateRegionState(rt, State.OPENING);
break;
case RS_ZK_REGION_OPENED:
// Should see OPENED after OPENING but possible after PENDING_OPEN.
if (regionState == null
|| !regionState.isPendingOpenOrOpeningOnServer(sn)) {
LOG.warn("Received OPENED for " + prettyPrintedRegionName
+ " from " + sn + " but the region isn't PENDING_OPEN/OPENING here: "
+ regionStates.getRegionState(encodedName));
if (regionState != null) {
// Close it without updating the internal region states,
// so as not to create double assignments in unlucky scenarios
// mentioned in OpenRegionHandler#process
unassign(regionState.getRegion(), null, -1, null, false, sn);
}
return;
}
// Handle OPENED by removing from transition and deleted zk node
regionState = regionStates.updateRegionState(rt, State.OPEN);
if (regionState != null) {
failedOpenTracker.remove(encodedName); // reset the count, if any
new OpenedRegionHandler(
server, this, regionState.getRegion(), sn, expectedVersion).process();
updateOpenedRegionHandlerTracker(regionState.getRegion());
}
break;
default:
throw new IllegalStateException("Received event is not valid.");
}
} finally {
lock.unlock();
}
}
//For unit tests only
boolean wasClosedHandlerCalled(HRegionInfo hri) {
AtomicBoolean b = closedRegionHandlerCalled.get(hri);
//compareAndSet to be sure that unit tests don't see stale values. Means,
//we will return true exactly once unless the handler code resets to true
//this value.
return b == null ? false : b.compareAndSet(true, false);
}
//For unit tests only
boolean wasOpenedHandlerCalled(HRegionInfo hri) {
AtomicBoolean b = openedRegionHandlerCalled.get(hri);
//compareAndSet to be sure that unit tests don't see stale values. Means,
//we will return true exactly once unless the handler code resets to true
//this value.
return b == null ? false : b.compareAndSet(true, false);
}
//For unit tests only
void initializeHandlerTrackers() {
closedRegionHandlerCalled = new HashMap<HRegionInfo, AtomicBoolean>();
openedRegionHandlerCalled = new HashMap<HRegionInfo, AtomicBoolean>();
}
void updateClosedRegionHandlerTracker(HRegionInfo hri) {
if (closedRegionHandlerCalled != null) { //only for unit tests this is true
closedRegionHandlerCalled.put(hri, new AtomicBoolean(true));
}
}
void updateOpenedRegionHandlerTracker(HRegionInfo hri) {
if (openedRegionHandlerCalled != null) { //only for unit tests this is true
openedRegionHandlerCalled.put(hri, new AtomicBoolean(true));
}
}
// TODO: processFavoredNodes might throw an exception, for e.g., if the
// meta could not be contacted/updated. We need to see how seriously to treat
// this problem as. Should we fail the current assignment. We should be able
// to recover from this problem eventually (if the meta couldn't be updated
// things should work normally and eventually get fixed up).
void processFavoredNodes(List<HRegionInfo> regions) throws IOException {
if (!shouldAssignRegionsWithFavoredNodes) return;
// The AM gets the favored nodes info for each region and updates the meta
// table with that info
Map<HRegionInfo, List<ServerName>> regionToFavoredNodes =
new HashMap<HRegionInfo, List<ServerName>>();
for (HRegionInfo region : regions) {
regionToFavoredNodes.put(region,
((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region));
}
FavoredNodeAssignmentHelper.updateMetaWithFavoredNodesInfo(regionToFavoredNodes, catalogTracker);
}
/**
* Handle a ZK unassigned node transition triggered by HBCK repair tool.
* <p>
* This is handled in a separate code path because it breaks the normal rules.
* @param rt
*/
private void handleHBCK(RegionTransition rt) {
String encodedName = HRegionInfo.encodeRegionName(rt.getRegionName());
LOG.info("Handling HBCK triggered transition=" + rt.getEventType() +
", server=" + rt.getServerName() + ", region=" +
HRegionInfo.prettyPrint(encodedName));
RegionState regionState = regionStates.getRegionTransitionState(encodedName);
switch (rt.getEventType()) {
case M_ZK_REGION_OFFLINE:
HRegionInfo regionInfo;
if (regionState != null) {
regionInfo = regionState.getRegion();
} else {
try {
byte [] name = rt.getRegionName();
Pair<HRegionInfo, ServerName> p = MetaReader.getRegion(catalogTracker, name);
regionInfo = p.getFirst();
} catch (IOException e) {
LOG.info("Exception reading hbase:meta doing HBCK repair operation", e);
return;
}
}
LOG.info("HBCK repair is triggering assignment of region=" +
regionInfo.getRegionNameAsString());
// trigger assign, node is already in OFFLINE so don't need to update ZK
assign(regionInfo, false);
break;
default:
LOG.warn("Received unexpected region state from HBCK: " + rt.toString());
break;
}
}
// ZooKeeper events
/**
* New unassigned node has been created.
*
* <p>This happens when an RS begins the OPENING or CLOSING of a region by
* creating an unassigned node.
*
* <p>When this happens we must:
* <ol>
* <li>Watch the node for further events</li>
* <li>Read and handle the state in the node</li>
* </ol>
*/
@Override
public void nodeCreated(String path) {
handleAssignmentEvent(path);
}
/**
* Existing unassigned node has had data changed.
*
* <p>This happens when an RS transitions from OFFLINE to OPENING, or between
* OPENING/OPENED and CLOSING/CLOSED.
*
* <p>When this happens we must:
* <ol>
* <li>Watch the node for further events</li>
* <li>Read and handle the state in the node</li>
* </ol>
*/
@Override
public void nodeDataChanged(String path) {
handleAssignmentEvent(path);
}
// We don't want to have two events on the same region managed simultaneously.
// For this reason, we need to wait if an event on the same region is currently in progress.
// So we track the region names of the events in progress, and we keep a waiting list.
private final Set<String> regionsInProgress = new HashSet<String>();
// In a LinkedHashMultimap, the put order is kept when we retrieve the collection back. We need
// this as we want the events to be managed in the same order as we received them.
private final LinkedHashMultimap <String, RegionRunnable>
zkEventWorkerWaitingList = LinkedHashMultimap.create();
/**
* A specific runnable that works only on a region.
*/
private interface RegionRunnable extends Runnable{
/**
* @return - the name of the region it works on.
*/
String getRegionName();
}
/**
* Submit a task, ensuring that there is only one task at a time that working on a given region.
* Order is respected.
*/
protected void zkEventWorkersSubmit(final RegionRunnable regRunnable) {
synchronized (regionsInProgress) {
// If we're there is already a task with this region, we add it to the
// waiting list and return.
if (regionsInProgress.contains(regRunnable.getRegionName())) {
synchronized (zkEventWorkerWaitingList){
zkEventWorkerWaitingList.put(regRunnable.getRegionName(), regRunnable);
}
return;
}
// No event in progress on this region => we can submit a new task immediately.
regionsInProgress.add(regRunnable.getRegionName());
zkEventWorkers.submit(new Runnable() {
@Override
public void run() {
try {
regRunnable.run();
} finally {
// now that we have finished, let's see if there is an event for the same region in the
// waiting list. If it's the case, we can now submit it to the pool.
synchronized (regionsInProgress) {
regionsInProgress.remove(regRunnable.getRegionName());
synchronized (zkEventWorkerWaitingList) {
java.util.Set<RegionRunnable> waiting = zkEventWorkerWaitingList.get(
regRunnable.getRegionName());
if (!waiting.isEmpty()) {
// We want the first object only. The only way to get it is through an iterator.
RegionRunnable toSubmit = waiting.iterator().next();
zkEventWorkerWaitingList.remove(toSubmit.getRegionName(), toSubmit);
zkEventWorkersSubmit(toSubmit);
}
}
}
}
}
});
}
}
@Override
public void nodeDeleted(final String path) {
if (path.startsWith(watcher.assignmentZNode)) {
final String regionName = ZKAssign.getRegionName(watcher, path);
zkEventWorkersSubmit(new RegionRunnable() {
@Override
public String getRegionName() {
return regionName;
}
@Override
public void run() {
Lock lock = locker.acquireLock(regionName);
try {
RegionState rs = regionStates.getRegionTransitionState(regionName);
if (rs == null) {
rs = regionStates.getRegionState(regionName);
if (rs == null || !rs.isMergingNew()) {
// MergingNew is an offline state
return;
}
}
HRegionInfo regionInfo = rs.getRegion();
String regionNameStr = regionInfo.getRegionNameAsString();
LOG.debug("Znode " + regionNameStr + " deleted, state: " + rs);
boolean disabled = getZKTable().isDisablingOrDisabledTable(regionInfo.getTable());
ServerName serverName = rs.getServerName();
if (serverManager.isServerOnline(serverName)) {
if (rs.isOnServer(serverName)
&& (rs.isOpened() || rs.isSplitting())) {
regionOnline(regionInfo, serverName);
if (disabled) {
// if server is offline, no hurt to unassign again
LOG.info("Opened " + regionNameStr
+ "but this table is disabled, triggering close of region");
unassign(regionInfo);
}
} else if (rs.isMergingNew()) {
synchronized (regionStates) {
String p = regionInfo.getEncodedName();
PairOfSameType<HRegionInfo> regions = mergingRegions.get(p);
if (regions != null) {
onlineMergingRegion(disabled, regions.getFirst(), serverName);
onlineMergingRegion(disabled, regions.getSecond(), serverName);
}
}
}
}
} finally {
lock.unlock();
}
}
private void onlineMergingRegion(boolean disabled,
final HRegionInfo hri, final ServerName serverName) {
RegionState regionState = regionStates.getRegionState(hri);
if (regionState != null && regionState.isMerging()
&& regionState.isOnServer(serverName)) {
regionOnline(regionState.getRegion(), serverName);
if (disabled) {
unassign(hri);
}
}
}
});
}
}
/**
* New unassigned node has been created.
*
* <p>This happens when an RS begins the OPENING, SPLITTING or CLOSING of a
* region by creating a znode.
*
* <p>When this happens we must:
* <ol>
* <li>Watch the node for further children changed events</li>
* <li>Watch all new children for changed events</li>
* </ol>
*/
@Override
public void nodeChildrenChanged(String path) {
if (path.equals(watcher.assignmentZNode)) {
zkEventWorkers.submit(new Runnable() {
@Override
public void run() {
try {
// Just make sure we see the changes for the new znodes
List<String> children =
ZKUtil.listChildrenAndWatchForNewChildren(
watcher, watcher.assignmentZNode);
if (children != null) {
Stat stat = new Stat();
for (String child : children) {
// if region is in transition, we already have a watch
// on it, so no need to watch it again. So, as I know for now,
// this is needed to watch splitting nodes only.
if (!regionStates.isRegionInTransition(child)) {
ZKAssign.getDataAndWatch(watcher, child, stat);
}
}
}
} catch (KeeperException e) {
server.abort("Unexpected ZK exception reading unassigned children", e);
}
}
});
}
}
/**
* Marks the region as online. Removes it from regions in transition and
* updates the in-memory assignment information.
* <p>
* Used when a region has been successfully opened on a region server.
* @param regionInfo
* @param sn
*/
void regionOnline(HRegionInfo regionInfo, ServerName sn) {
regionOnline(regionInfo, sn, HConstants.NO_SEQNUM);
}
void regionOnline(HRegionInfo regionInfo, ServerName sn, long openSeqNum) {
numRegionsOpened.incrementAndGet();
regionStates.regionOnline(regionInfo, sn, openSeqNum);
// Remove plan if one.
clearRegionPlan(regionInfo);
// Add the server to serversInUpdatingTimer
addToServersInUpdatingTimer(sn);
balancer.regionOnline(regionInfo, sn);
// Tell our listeners that a region was opened
sendRegionOpenedNotification(regionInfo, sn);
}
/**
* Pass the assignment event to a worker for processing.
* Each worker is a single thread executor service. The reason
* for just one thread is to make sure all events for a given
* region are processed in order.
*
* @param path
*/
private void handleAssignmentEvent(final String path) {
if (path.startsWith(watcher.assignmentZNode)) {
final String regionName = ZKAssign.getRegionName(watcher, path);
zkEventWorkersSubmit(new RegionRunnable() {
@Override
public String getRegionName() {
return regionName;
}
@Override
public void run() {
try {
Stat stat = new Stat();
byte [] data = ZKAssign.getDataAndWatch(watcher, path, stat);
if (data == null) return;
RegionTransition rt = RegionTransition.parseFrom(data);
handleRegion(rt, stat.getVersion());
} catch (KeeperException e) {
server.abort("Unexpected ZK exception reading unassigned node data", e);
} catch (DeserializationException e) {
server.abort("Unexpected exception deserializing node data", e);
}
}
});
}
}
/**
* Add the server to the set serversInUpdatingTimer, then {@link TimerUpdater}
* will update timers for this server in background
* @param sn
*/
private void addToServersInUpdatingTimer(final ServerName sn) {
if (tomActivated){
this.serversInUpdatingTimer.add(sn);
}
}
/**
* Touch timers for all regions in transition that have the passed
* <code>sn</code> in common.
* Call this method whenever a server checks in. Doing so helps the case where
* a new regionserver has joined the cluster and its been given 1k regions to
* open. If this method is tickled every time the region reports in a
* successful open then the 1k-th region won't be timed out just because its
* sitting behind the open of 999 other regions. This method is NOT used
* as part of bulk assign -- there we have a different mechanism for extending
* the regions in transition timer (we turn it off temporarily -- because
* there is no regionplan involved when bulk assigning.
* @param sn
*/
private void updateTimers(final ServerName sn) {
Preconditions.checkState(tomActivated);
if (sn == null) return;
// This loop could be expensive.
// First make a copy of current regionPlan rather than hold sync while
// looping because holding sync can cause deadlock. Its ok in this loop
// if the Map we're going against is a little stale
List<Map.Entry<String, RegionPlan>> rps;
synchronized(this.regionPlans) {
rps = new ArrayList<Map.Entry<String, RegionPlan>>(regionPlans.entrySet());
}
for (Map.Entry<String, RegionPlan> e : rps) {
if (e.getValue() != null && e.getKey() != null && sn.equals(e.getValue().getDestination())) {
RegionState regionState = regionStates.getRegionTransitionState(e.getKey());
if (regionState != null) {
regionState.updateTimestampToNow();
}
}
}
}
/**
* Marks the region as offline. Removes it from regions in transition and
* removes in-memory assignment information.
* <p>
* Used when a region has been closed and should remain closed.
* @param regionInfo
*/
public void regionOffline(final HRegionInfo regionInfo) {
regionOffline(regionInfo, null);
}
public void offlineDisabledRegion(HRegionInfo regionInfo) {
if (useZKForAssignment) {
// Disabling so should not be reassigned, just delete the CLOSED node
LOG.debug("Table being disabled so deleting ZK node and removing from " +
"regions in transition, skipping assignment of region " +
regionInfo.getRegionNameAsString());
String encodedName = regionInfo.getEncodedName();
deleteNodeInStates(encodedName, "closed", null,
EventType.RS_ZK_REGION_CLOSED, EventType.M_ZK_REGION_OFFLINE);
}
regionOffline(regionInfo);
}
// Assignment methods
/**
* Assigns the specified region.
* <p>
* If a RegionPlan is available with a valid destination then it will be used
* to determine what server region is assigned to. If no RegionPlan is
* available, region will be assigned to a random available server.
* <p>
* Updates the RegionState and sends the OPEN RPC.
* <p>
* This will only succeed if the region is in transition and in a CLOSED or
* OFFLINE state or not in transition (in-memory not zk), and of course, the
* chosen server is up and running (It may have just crashed!). If the
* in-memory checks pass, the zk node is forced to OFFLINE before assigning.
*
* @param region server to be assigned
* @param setOfflineInZK whether ZK node should be created/transitioned to an
* OFFLINE state before assigning the region
*/
public void assign(HRegionInfo region, boolean setOfflineInZK) {
assign(region, setOfflineInZK, false);
}
/**
* Use care with forceNewPlan. It could cause double assignment.
*/
public void assign(HRegionInfo region,
boolean setOfflineInZK, boolean forceNewPlan) {
if (isDisabledorDisablingRegionInRIT(region)) {
return;
}
if (this.serverManager.isClusterShutdown()) {
LOG.info("Cluster shutdown is set; skipping assign of " +
region.getRegionNameAsString());
return;
}
String encodedName = region.getEncodedName();
Lock lock = locker.acquireLock(encodedName);
try {
RegionState state = forceRegionStateToOffline(region, forceNewPlan);
if (state != null) {
if (regionStates.wasRegionOnDeadServer(encodedName)) {
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName)
+ " is dead but not processed yet");
return;
}
assign(state, setOfflineInZK && useZKForAssignment, forceNewPlan);
}
} finally {
lock.unlock();
}
}
/**
* Bulk assign regions to <code>destination</code>.
* @param destination
* @param regions Regions to assign.
* @return true if successful
*/
boolean assign(final ServerName destination, final List<HRegionInfo> regions) {
long startTime = EnvironmentEdgeManager.currentTimeMillis();
try {
int regionCount = regions.size();
if (regionCount == 0) {
return true;
}
LOG.debug("Assigning " + regionCount + " region(s) to " + destination.toString());
Set<String> encodedNames = new HashSet<String>(regionCount);
for (HRegionInfo region : regions) {
encodedNames.add(region.getEncodedName());
}
List<HRegionInfo> failedToOpenRegions = new ArrayList<HRegionInfo>();
Map<String, Lock> locks = locker.acquireLocks(encodedNames);
try {
AtomicInteger counter = new AtomicInteger(0);
Map<String, Integer> offlineNodesVersions = new ConcurrentHashMap<String, Integer>();
OfflineCallback cb = new OfflineCallback(
watcher, destination, counter, offlineNodesVersions);
Map<String, RegionPlan> plans = new HashMap<String, RegionPlan>(regions.size());
List<RegionState> states = new ArrayList<RegionState>(regions.size());
for (HRegionInfo region : regions) {
String encodedName = region.getEncodedName();
if (!isDisabledorDisablingRegionInRIT(region)) {
RegionState state = forceRegionStateToOffline(region, false);
boolean onDeadServer = false;
if (state != null) {
if (regionStates.wasRegionOnDeadServer(encodedName)) {
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", it's host " + regionStates.getLastRegionServerOfRegion(encodedName)
+ " is dead but not processed yet");
onDeadServer = true;
} else if (!useZKForAssignment
|| asyncSetOfflineInZooKeeper(state, cb, destination)) {
RegionPlan plan = new RegionPlan(region, state.getServerName(), destination);
plans.put(encodedName, plan);
states.add(state);
continue;
}
}
// Reassign if the region wasn't on a dead server
if (!onDeadServer) {
LOG.info("failed to force region state to offline or "
+ "failed to set it offline in ZK, will reassign later: " + region);
failedToOpenRegions.add(region); // assign individually later
}
}
// Release the lock, this region is excluded from bulk assign because
// we can't update its state, or set its znode to offline.
Lock lock = locks.remove(encodedName);
lock.unlock();
}
if (useZKForAssignment) {
// Wait until all unassigned nodes have been put up and watchers set.
int total = states.size();
for (int oldCounter = 0; !server.isStopped();) {
int count = counter.get();
if (oldCounter != count) {
LOG.info(destination.toString() + " unassigned znodes=" + count + " of total="
+ total);
oldCounter = count;
}
if (count >= total) break;
Threads.sleep(5);
}
}
if (server.isStopped()) {
return false;
}
// Add region plans, so we can updateTimers when one region is opened so
// that unnecessary timeout on RIT is reduced.
this.addPlans(plans);
List<Triple<HRegionInfo, Integer, List<ServerName>>> regionOpenInfos =
new ArrayList<Triple<HRegionInfo, Integer, List<ServerName>>>(states.size());
for (RegionState state: states) {
HRegionInfo region = state.getRegion();
String encodedRegionName = region.getEncodedName();
Integer nodeVersion = offlineNodesVersions.get(encodedRegionName);
if (useZKForAssignment && (nodeVersion == null || nodeVersion == -1)) {
LOG.warn("failed to offline in zookeeper: " + region);
failedToOpenRegions.add(region); // assign individually later
Lock lock = locks.remove(encodedRegionName);
lock.unlock();
} else {
regionStates.updateRegionState(
region, State.PENDING_OPEN, destination);
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (this.shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region);
}
regionOpenInfos.add(new Triple<HRegionInfo, Integer, List<ServerName>>(
region, nodeVersion, favoredNodes));
}
}
// Move on to open regions.
try {
// Send OPEN RPC. If it fails on a IOE or RemoteException,
// regions will be assigned individually.
long maxWaitTime = System.currentTimeMillis() +
this.server.getConfiguration().
getLong("hbase.regionserver.rpc.startup.waittime", 60000);
for (int i = 1; i <= maximumAttempts && !server.isStopped(); i++) {
try {
// regionOpenInfos is empty if all regions are in failedToOpenRegions list
if (regionOpenInfos.isEmpty()) {
break;
}
List<RegionOpeningState> regionOpeningStateList = serverManager
.sendRegionOpen(destination, regionOpenInfos);
if (regionOpeningStateList == null) {
// Failed getting RPC connection to this server
return false;
}
for (int k = 0, n = regionOpeningStateList.size(); k < n; k++) {
RegionOpeningState openingState = regionOpeningStateList.get(k);
if (openingState != RegionOpeningState.OPENED) {
HRegionInfo region = regionOpenInfos.get(k).getFirst();
if (openingState == RegionOpeningState.ALREADY_OPENED) {
processAlreadyOpenedRegion(region, destination);
} else if (openingState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, reassign it later
failedToOpenRegions.add(region);
} else {
LOG.warn("THIS SHOULD NOT HAPPEN: unknown opening state "
+ openingState + " in assigning region " + region);
}
}
}
break;
} catch (IOException e) {
if (e instanceof RemoteException) {
e = ((RemoteException)e).unwrapRemoteException();
}
if (e instanceof RegionServerStoppedException) {
LOG.warn("The region server was shut down, ", e);
// No need to retry, the region server is a goner.
return false;
} else if (e instanceof ServerNotRunningYetException) {
long now = System.currentTimeMillis();
if (now < maxWaitTime) {
LOG.debug("Server is not yet up; waiting up to " +
(maxWaitTime - now) + "ms", e);
Thread.sleep(100);
i--; // reset the try count
continue;
}
} else if (e instanceof java.net.SocketTimeoutException
&& this.serverManager.isServerOnline(destination)) {
// In case socket is timed out and the region server is still online,
// the openRegion RPC could have been accepted by the server and
// just the response didn't go through. So we will retry to
// open the region on the same server.
if (LOG.isDebugEnabled()) {
LOG.debug("Bulk assigner openRegion() to " + destination
+ " has timed out, but the regions might"
+ " already be opened on it.", e);
}
// wait and reset the re-try count, server might be just busy.
Thread.sleep(100);
i--;
continue;
}
throw e;
}
}
} catch (IOException e) {
// Can be a socket timeout, EOF, NoRouteToHost, etc
LOG.info("Unable to communicate with " + destination
+ " in order to assign regions, ", e);
return false;
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
} finally {
for (Lock lock : locks.values()) {
lock.unlock();
}
}
if (!failedToOpenRegions.isEmpty()) {
for (HRegionInfo region : failedToOpenRegions) {
if (!regionStates.isRegionOnline(region)) {
invokeAssign(region);
}
}
}
LOG.debug("Bulk assigning done for " + destination);
return true;
} finally {
metricsAssignmentManager.updateBulkAssignTime(EnvironmentEdgeManager.currentTimeMillis() - startTime);
}
}
/**
* Send CLOSE RPC if the server is online, otherwise, offline the region.
*
* The RPC will be sent only to the region sever found in the region state
* if it is passed in, otherwise, to the src server specified. If region
* state is not specified, we don't update region state at all, instead
* we just send the RPC call. This is useful for some cleanup without
* messing around the region states (see handleRegion, on region opened
* on an unexpected server scenario, for an example)
*/
private void unassign(final HRegionInfo region,
final RegionState state, final int versionOfClosingNode,
final ServerName dest, final boolean transitionInZK,
final ServerName src) {
ServerName server = src;
if (state != null) {
server = state.getServerName();
}
long maxWaitTime = -1;
for (int i = 1; i <= this.maximumAttempts; i++) {
if (this.server.isStopped() || this.server.isAborted()) {
LOG.debug("Server stopped/aborted; skipping unassign of " + region);
return;
}
// ClosedRegionhandler can remove the server from this.regions
if (!serverManager.isServerOnline(server)) {
LOG.debug("Offline " + region.getRegionNameAsString()
+ ", no need to unassign since it's on a dead server: " + server);
if (transitionInZK) {
// delete the node. if no node exists need not bother.
deleteClosingOrClosedNode(region, server);
}
if (state != null) {
regionOffline(region);
}
return;
}
try {
// Send CLOSE RPC
if (serverManager.sendRegionClose(server, region,
versionOfClosingNode, dest, transitionInZK)) {
LOG.debug("Sent CLOSE to " + server + " for region " +
region.getRegionNameAsString());
if (useZKForAssignment && !transitionInZK && state != null) {
// Retry to make sure the region is
// closed so as to avoid double assignment.
unassign(region, state, versionOfClosingNode,
dest, transitionInZK, src);
}
return;
}
// This never happens. Currently regionserver close always return true.
// Todo; this can now happen (0.96) if there is an exception in a coprocessor
LOG.warn("Server " + server + " region CLOSE RPC returned false for " +
region.getRegionNameAsString());
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException)t).unwrapRemoteException();
}
boolean logRetries = true;
if (t instanceof NotServingRegionException
|| t instanceof RegionServerStoppedException
|| t instanceof ServerNotRunningYetException) {
LOG.debug("Offline " + region.getRegionNameAsString()
+ ", it's not any more on " + server, t);
if (transitionInZK) {
deleteClosingOrClosedNode(region, server);
}
if (state != null) {
regionOffline(region);
}
return;
} else if ((t instanceof FailedServerException) || (state != null &&
t instanceof RegionAlreadyInTransitionException)) {
long sleepTime = 0;
Configuration conf = this.server.getConfiguration();
if(t instanceof FailedServerException) {
sleepTime = 1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY,
RpcClient.FAILED_SERVER_EXPIRY_DEFAULT);
} else {
// RS is already processing this region, only need to update the timestamp
LOG.debug("update " + state + " the timestamp.");
state.updateTimestampToNow();
if (maxWaitTime < 0) {
maxWaitTime =
EnvironmentEdgeManager.currentTimeMillis()
+ conf.getLong(ALREADY_IN_TRANSITION_WAITTIME,
DEFAULT_ALREADY_IN_TRANSITION_WAITTIME);
}
long now = EnvironmentEdgeManager.currentTimeMillis();
if (now < maxWaitTime) {
LOG.debug("Region is already in transition; "
+ "waiting up to " + (maxWaitTime - now) + "ms", t);
sleepTime = 100;
i--; // reset the try count
logRetries = false;
}
}
try {
if (sleepTime > 0) {
Thread.sleep(sleepTime);
}
} catch (InterruptedException ie) {
LOG.warn("Failed to unassign "
+ region.getRegionNameAsString() + " since interrupted", ie);
Thread.currentThread().interrupt();
if (!tomActivated && state != null) {
regionStates.updateRegionState(region, State.FAILED_CLOSE);
}
return;
}
}
if (logRetries) {
LOG.info("Server " + server + " returned " + t + " for "
+ region.getRegionNameAsString() + ", try=" + i
+ " of " + this.maximumAttempts, t);
// Presume retry or server will expire.
}
}
}
// Run out of attempts
if (!tomActivated && state != null) {
regionStates.updateRegionState(region, State.FAILED_CLOSE);
}
}
/**
* Set region to OFFLINE unless it is opening and forceNewPlan is false.
*/
private RegionState forceRegionStateToOffline(
final HRegionInfo region, final boolean forceNewPlan) {
RegionState state = regionStates.getRegionState(region);
if (state == null) {
LOG.warn("Assigning a region not in region states: " + region);
state = regionStates.createRegionState(region);
}
ServerName sn = state.getServerName();
if (forceNewPlan && LOG.isDebugEnabled()) {
LOG.debug("Force region state offline " + state);
}
switch (state.getState()) {
case OPEN:
case OPENING:
case PENDING_OPEN:
case CLOSING:
case PENDING_CLOSE:
if (!forceNewPlan) {
LOG.debug("Skip assigning " +
region + ", it is already " + state);
return null;
}
case FAILED_CLOSE:
case FAILED_OPEN:
unassign(region, state, -1, null, false, null);
state = regionStates.getRegionState(region);
if (state.isFailedClose()) {
// If we can't close the region, we can't re-assign
// it so as to avoid possible double assignment/data loss.
LOG.info("Skip assigning " +
region + ", we couldn't close it: " + state);
return null;
}
case OFFLINE:
// This region could have been open on this server
// for a while. If the server is dead and not processed
// yet, we can move on only if the meta shows the
// region is not on this server actually, or on a server
// not dead, or dead and processed already.
// In case not using ZK, we don't need this check because
// we have the latest info in memory, and the caller
// will do another round checking any way.
if (useZKForAssignment
&& regionStates.isServerDeadAndNotProcessed(sn)
&& wasRegionOnDeadServerByMeta(region, sn)) {
if (!regionStates.isRegionInTransition(region)) {
LOG.info("Updating the state to " + State.OFFLINE + " to allow to be reassigned by SSH");
regionStates.updateRegionState(region, State.OFFLINE);
}
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", it is on a dead but not processed yet server: " + sn);
return null;
}
case CLOSED:
break;
default:
LOG.error("Trying to assign region " + region
+ ", which is " + state);
return null;
}
return state;
}
private boolean wasRegionOnDeadServerByMeta(
final HRegionInfo region, final ServerName sn) {
try {
if (region.isMetaRegion()) {
ServerName server = catalogTracker.getMetaLocation();
return regionStates.isServerDeadAndNotProcessed(server);
}
while (!server.isStopped()) {
try {
catalogTracker.waitForMeta();
Result r = MetaReader.getRegionResult(catalogTracker, region.getRegionName());
if (r == null || r.isEmpty()) return false;
ServerName server = HRegionInfo.getServerName(r);
return regionStates.isServerDeadAndNotProcessed(server);
} catch (IOException ioe) {
LOG.info("Received exception accessing hbase:meta during force assign "
+ region.getRegionNameAsString() + ", retrying", ioe);
}
}
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
LOG.info("Interrupted accessing hbase:meta", e);
}
// Call is interrupted or server is stopped.
return regionStates.isServerDeadAndNotProcessed(sn);
}
/**
* Caller must hold lock on the passed <code>state</code> object.
* @param state
* @param setOfflineInZK
* @param forceNewPlan
*/
private void assign(RegionState state,
final boolean setOfflineInZK, final boolean forceNewPlan) {
long startTime = EnvironmentEdgeManager.currentTimeMillis();
try {
Configuration conf = server.getConfiguration();
RegionState currentState = state;
int versionOfOfflineNode = -1;
RegionPlan plan = null;
long maxWaitTime = -1;
HRegionInfo region = state.getRegion();
RegionOpeningState regionOpenState;
Throwable previousException = null;
for (int i = 1; i <= maximumAttempts; i++) {
if (server.isStopped() || server.isAborted()) {
LOG.info("Skip assigning " + region.getRegionNameAsString()
+ ", the server is stopped/aborted");
return;
}
if (plan == null) { // Get a server for the region at first
try {
plan = getRegionPlan(region, forceNewPlan);
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
}
if (plan == null) {
LOG.warn("Unable to determine a plan to assign " + region);
if (tomActivated){
this.timeoutMonitor.setAllRegionServersOffline(true);
} else {
if (region.isMetaRegion()) {
try {
Thread.sleep(this.sleepTimeBeforeRetryingMetaAssignment);
if (i == maximumAttempts) i = 1;
continue;
} catch (InterruptedException e) {
LOG.error("Got exception while waiting for hbase:meta assignment");
Thread.currentThread().interrupt();
}
}
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
return;
}
if (setOfflineInZK && versionOfOfflineNode == -1) {
// get the version of the znode after setting it to OFFLINE.
// versionOfOfflineNode will be -1 if the znode was not set to OFFLINE
versionOfOfflineNode = setOfflineInZooKeeper(currentState, plan.getDestination());
if (versionOfOfflineNode != -1) {
if (isDisabledorDisablingRegionInRIT(region)) {
return;
}
// In case of assignment from EnableTableHandler table state is ENABLING. Any how
// EnableTableHandler will set ENABLED after assigning all the table regions. If we
// try to set to ENABLED directly then client API may think table is enabled.
// When we have a case such as all the regions are added directly into hbase:meta and we call
// assignRegion then we need to make the table ENABLED. Hence in such case the table
// will not be in ENABLING or ENABLED state.
TableName tableName = region.getTable();
if (!zkTable.isEnablingTable(tableName) && !zkTable.isEnabledTable(tableName)) {
LOG.debug("Setting table " + tableName + " to ENABLED state.");
setEnabledTable(tableName);
}
}
}
if (setOfflineInZK && versionOfOfflineNode == -1) {
LOG.info("Unable to set offline in ZooKeeper to assign " + region);
// Setting offline in ZK must have been failed due to ZK racing or some
// exception which may make the server to abort. If it is ZK racing,
// we should retry since we already reset the region state,
// existing (re)assignment will fail anyway.
if (!server.isAborted()) {
continue;
}
}
LOG.info("Assigning " + region.getRegionNameAsString() +
" to " + plan.getDestination().toString());
// Transition RegionState to PENDING_OPEN
currentState = regionStates.updateRegionState(region,
State.PENDING_OPEN, plan.getDestination());
boolean needNewPlan;
final String assignMsg = "Failed assignment of " + region.getRegionNameAsString() +
" to " + plan.getDestination();
try {
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (this.shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)this.balancer).getFavoredNodes(region);
}
regionOpenState = serverManager.sendRegionOpen(
plan.getDestination(), region, versionOfOfflineNode, favoredNodes);
if (regionOpenState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, looping again on a new server.
needNewPlan = true;
LOG.warn(assignMsg + ", regionserver says 'FAILED_OPENING', " +
" trying to assign elsewhere instead; " +
"try=" + i + " of " + this.maximumAttempts);
} else {
// we're done
if (regionOpenState == RegionOpeningState.ALREADY_OPENED) {
processAlreadyOpenedRegion(region, plan.getDestination());
}
return;
}
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
previousException = t;
// Should we wait a little before retrying? If the server is starting it's yes.
// If the region is already in transition, it's yes as well: we want to be sure that
// the region will get opened but we don't want a double assignment.
boolean hold = (t instanceof RegionAlreadyInTransitionException ||
t instanceof ServerNotRunningYetException);
// In case socket is timed out and the region server is still online,
// the openRegion RPC could have been accepted by the server and
// just the response didn't go through. So we will retry to
// open the region on the same server to avoid possible
// double assignment.
boolean retry = !hold && (t instanceof java.net.SocketTimeoutException
&& this.serverManager.isServerOnline(plan.getDestination()));
if (hold) {
LOG.warn(assignMsg + ", waiting a little before trying on the same region server " +
"try=" + i + " of " + this.maximumAttempts, t);
if (maxWaitTime < 0) {
if (t instanceof RegionAlreadyInTransitionException) {
maxWaitTime = EnvironmentEdgeManager.currentTimeMillis()
+ this.server.getConfiguration().getLong(ALREADY_IN_TRANSITION_WAITTIME,
DEFAULT_ALREADY_IN_TRANSITION_WAITTIME);
} else {
maxWaitTime = EnvironmentEdgeManager.currentTimeMillis()
+ this.server.getConfiguration().getLong(
"hbase.regionserver.rpc.startup.waittime", 60000);
}
}
try {
needNewPlan = false;
long now = EnvironmentEdgeManager.currentTimeMillis();
if (now < maxWaitTime) {
LOG.debug("Server is not yet up or region is already in transition; "
+ "waiting up to " + (maxWaitTime - now) + "ms", t);
Thread.sleep(100);
i--; // reset the try count
} else if (!(t instanceof RegionAlreadyInTransitionException)) {
LOG.debug("Server is not up for a while; try a new one", t);
needNewPlan = true;
}
} catch (InterruptedException ie) {
LOG.warn("Failed to assign "
+ region.getRegionNameAsString() + " since interrupted", ie);
Thread.currentThread().interrupt();
if (!tomActivated) {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
return;
}
} else if (retry) {
needNewPlan = false;
i--; // we want to retry as many times as needed as long as the RS is not dead.
LOG.warn(assignMsg + ", trying to assign to the same region server due ", t);
} else {
needNewPlan = true;
LOG.warn(assignMsg + ", trying to assign elsewhere instead;" +
" try=" + i + " of " + this.maximumAttempts, t);
}
}
if (i == this.maximumAttempts) {
// Don't reset the region state or get a new plan any more.
// This is the last try.
continue;
}
// If region opened on destination of present plan, reassigning to new
// RS may cause double assignments. In case of RegionAlreadyInTransitionException
// reassigning to same RS.
if (needNewPlan) {
// Force a new plan and reassign. Will return null if no servers.
// The new plan could be the same as the existing plan since we don't
// exclude the server of the original plan, which should not be
// excluded since it could be the only server up now.
RegionPlan newPlan = null;
try {
newPlan = getRegionPlan(region, true);
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
if (newPlan == null) {
if (tomActivated) {
this.timeoutMonitor.setAllRegionServersOffline(true);
} else {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
LOG.warn("Unable to find a viable location to assign region " +
region.getRegionNameAsString());
return;
}
if (plan != newPlan && !plan.getDestination().equals(newPlan.getDestination())) {
// Clean out plan we failed execute and one that doesn't look like it'll
// succeed anyways; we need a new plan!
// Transition back to OFFLINE
currentState = regionStates.updateRegionState(region, State.OFFLINE);
versionOfOfflineNode = -1;
plan = newPlan;
} else if(plan.getDestination().equals(newPlan.getDestination()) &&
previousException instanceof FailedServerException) {
try {
LOG.info("Trying to re-assign " + region.getRegionNameAsString() +
" to the same failed server.");
Thread.sleep(1 + conf.getInt(RpcClient.FAILED_SERVER_EXPIRY_KEY,
RpcClient.FAILED_SERVER_EXPIRY_DEFAULT));
} catch (InterruptedException ie) {
LOG.warn("Failed to assign "
+ region.getRegionNameAsString() + " since interrupted", ie);
Thread.currentThread().interrupt();
if (!tomActivated) {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
return;
}
}
}
}
// Run out of attempts
if (!tomActivated) {
regionStates.updateRegionState(region, State.FAILED_OPEN);
}
} finally {
metricsAssignmentManager.updateAssignmentTime(EnvironmentEdgeManager.currentTimeMillis() - startTime);
}
}
private void processAlreadyOpenedRegion(HRegionInfo region, ServerName sn) {
// Remove region from in-memory transition and unassigned node from ZK
// While trying to enable the table the regions of the table were
// already enabled.
LOG.debug("ALREADY_OPENED " + region.getRegionNameAsString()
+ " to " + sn);
String encodedName = region.getEncodedName();
deleteNodeInStates(encodedName, "offline", sn, EventType.M_ZK_REGION_OFFLINE);
regionStates.regionOnline(region, sn);
}
private boolean isDisabledorDisablingRegionInRIT(final HRegionInfo region) {
TableName tableName = region.getTable();
boolean disabled = this.zkTable.isDisabledTable(tableName);
if (disabled || this.zkTable.isDisablingTable(tableName)) {
LOG.info("Table " + tableName + (disabled ? " disabled;" : " disabling;") +
" skipping assign of " + region.getRegionNameAsString());
offlineDisabledRegion(region);
return true;
}
return false;
}
/**
* Set region as OFFLINED up in zookeeper
*
* @param state
* @return the version of the offline node if setting of the OFFLINE node was
* successful, -1 otherwise.
*/
private int setOfflineInZooKeeper(final RegionState state, final ServerName destination) {
if (!state.isClosed() && !state.isOffline()) {
String msg = "Unexpected state : " + state + " .. Cannot transit it to OFFLINE.";
this.server.abort(msg, new IllegalStateException(msg));
return -1;
}
regionStates.updateRegionState(state.getRegion(), State.OFFLINE);
int versionOfOfflineNode;
try {
// get the version after setting the znode to OFFLINE
versionOfOfflineNode = ZKAssign.createOrForceNodeOffline(watcher,
state.getRegion(), destination);
if (versionOfOfflineNode == -1) {
LOG.warn("Attempted to create/force node into OFFLINE state before "
+ "completing assignment but failed to do so for " + state);
return -1;
}
} catch (KeeperException e) {
server.abort("Unexpected ZK exception creating/setting node OFFLINE", e);
return -1;
}
return versionOfOfflineNode;
}
/**
* @param region the region to assign
* @return Plan for passed <code>region</code> (If none currently, it creates one or
* if no servers to assign, it returns null).
*/
private RegionPlan getRegionPlan(final HRegionInfo region,
final boolean forceNewPlan) throws HBaseIOException {
return getRegionPlan(region, null, forceNewPlan);
}
/**
* @param region the region to assign
* @param serverToExclude Server to exclude (we know its bad). Pass null if
* all servers are thought to be assignable.
* @param forceNewPlan If true, then if an existing plan exists, a new plan
* will be generated.
* @return Plan for passed <code>region</code> (If none currently, it creates one or
* if no servers to assign, it returns null).
*/
private RegionPlan getRegionPlan(final HRegionInfo region,
final ServerName serverToExclude, final boolean forceNewPlan) throws HBaseIOException {
// Pickup existing plan or make a new one
final String encodedName = region.getEncodedName();
final List<ServerName> destServers =
serverManager.createDestinationServersList(serverToExclude);
if (destServers.isEmpty()){
LOG.warn("Can't move " + encodedName +
", there is no destination server available.");
return null;
}
RegionPlan randomPlan = null;
boolean newPlan = false;
RegionPlan existingPlan;
synchronized (this.regionPlans) {
existingPlan = this.regionPlans.get(encodedName);
if (existingPlan != null && existingPlan.getDestination() != null) {
LOG.debug("Found an existing plan for " + region.getRegionNameAsString()
+ " destination server is " + existingPlan.getDestination() +
" accepted as a dest server = " + destServers.contains(existingPlan.getDestination()));
}
if (forceNewPlan
|| existingPlan == null
|| existingPlan.getDestination() == null
|| !destServers.contains(existingPlan.getDestination())) {
newPlan = true;
randomPlan = new RegionPlan(region, null,
balancer.randomAssignment(region, destServers));
if (!region.isMetaTable() && shouldAssignRegionsWithFavoredNodes) {
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(1);
regions.add(region);
try {
processFavoredNodes(regions);
} catch (IOException ie) {
LOG.warn("Ignoring exception in processFavoredNodes " + ie);
}
}
this.regionPlans.put(encodedName, randomPlan);
}
}
if (newPlan) {
if (randomPlan.getDestination() == null) {
LOG.warn("Can't find a destination for " + encodedName);
return null;
}
LOG.debug("No previous transition plan found (or ignoring " +
"an existing plan) for " + region.getRegionNameAsString() +
"; generated random plan=" + randomPlan + "; " +
serverManager.countOfRegionServers() +
" (online=" + serverManager.getOnlineServers().size() +
", available=" + destServers.size() + ") available servers" +
", forceNewPlan=" + forceNewPlan);
return randomPlan;
}
LOG.debug("Using pre-existing plan for " +
region.getRegionNameAsString() + "; plan=" + existingPlan);
return existingPlan;
}
/**
* Unassigns the specified region.
* <p>
* Updates the RegionState and sends the CLOSE RPC unless region is being
* split by regionserver; then the unassign fails (silently) because we
* presume the region being unassigned no longer exists (its been split out
* of existence). TODO: What to do if split fails and is rolled back and
* parent is revivified?
* <p>
* If a RegionPlan is already set, it will remain.
*
* @param region server to be unassigned
*/
public void unassign(HRegionInfo region) {
unassign(region, false);
}
/**
* Unassigns the specified region.
* <p>
* Updates the RegionState and sends the CLOSE RPC unless region is being
* split by regionserver; then the unassign fails (silently) because we
* presume the region being unassigned no longer exists (its been split out
* of existence). TODO: What to do if split fails and is rolled back and
* parent is revivified?
* <p>
* If a RegionPlan is already set, it will remain.
*
* @param region server to be unassigned
* @param force if region should be closed even if already closing
*/
public void unassign(HRegionInfo region, boolean force, ServerName dest) {
// TODO: Method needs refactoring. Ugly buried returns throughout. Beware!
LOG.debug("Starting unassign of " + region.getRegionNameAsString()
+ " (offlining), current state: " + regionStates.getRegionState(region));
String encodedName = region.getEncodedName();
// Grab the state of this region and synchronize on it
int versionOfClosingNode = -1;
// We need a lock here as we're going to do a put later and we don't want multiple states
// creation
ReentrantLock lock = locker.acquireLock(encodedName);
RegionState state = regionStates.getRegionTransitionState(encodedName);
boolean reassign = true;
try {
if (state == null) {
// Region is not in transition.
// We can unassign it only if it's not SPLIT/MERGED.
state = regionStates.getRegionState(encodedName);
if (state != null && state.isUnassignable()) {
LOG.info("Attempting to unassign " + state + ", ignored");
// Offline region will be reassigned below
return;
}
// Create the znode in CLOSING state
try {
if (state == null || state.getServerName() == null) {
// We don't know where the region is, offline it.
// No need to send CLOSE RPC
LOG.warn("Attempting to unassign a region not in RegionStates"
+ region.getRegionNameAsString() + ", offlined");
regionOffline(region);
return;
}
if (useZKForAssignment) {
versionOfClosingNode = ZKAssign.createNodeClosing(
watcher, region, state.getServerName());
if (versionOfClosingNode == -1) {
LOG.info("Attempting to unassign " +
region.getRegionNameAsString() + " but ZK closing node "
+ "can't be created.");
reassign = false; // not unassigned at all
return;
}
}
} catch (KeeperException e) {
if (e instanceof NodeExistsException) {
// Handle race between master initiated close and regionserver
// orchestrated splitting. See if existing node is in a
// SPLITTING or SPLIT state. If so, the regionserver started
// an op on node before we could get our CLOSING in. Deal.
NodeExistsException nee = (NodeExistsException)e;
String path = nee.getPath();
try {
if (isSplitOrSplittingOrMergedOrMerging(path)) {
LOG.debug(path + " is SPLIT or SPLITTING or MERGED or MERGING; " +
"skipping unassign because region no longer exists -- its split or merge");
reassign = false; // no need to reassign for split/merged region
return;
}
} catch (KeeperException.NoNodeException ke) {
LOG.warn("Failed getData on SPLITTING/SPLIT at " + path +
"; presuming split and that the region to unassign, " +
encodedName + ", no longer exists -- confirm", ke);
return;
} catch (KeeperException ke) {
LOG.error("Unexpected zk state", ke);
} catch (DeserializationException de) {
LOG.error("Failed parse", de);
}
}
// If we get here, don't understand whats going on -- abort.
server.abort("Unexpected ZK exception creating node CLOSING", e);
reassign = false; // heading out already
return;
}
state = regionStates.updateRegionState(region, State.PENDING_CLOSE);
} else if (state.isFailedOpen()) {
// The region is not open yet
regionOffline(region);
return;
} else if (force && state.isPendingCloseOrClosing()) {
LOG.debug("Attempting to unassign " + region.getRegionNameAsString() +
" which is already " + state.getState() +
" but forcing to send a CLOSE RPC again ");
if (state.isFailedClose()) {
state = regionStates.updateRegionState(region, State.PENDING_CLOSE);
}
state.updateTimestampToNow();
} else {
LOG.debug("Attempting to unassign " +
region.getRegionNameAsString() + " but it is " +
"already in transition (" + state.getState() + ", force=" + force + ")");
return;
}
unassign(region, state, versionOfClosingNode, dest, useZKForAssignment, null);
} finally {
lock.unlock();
// Region is expected to be reassigned afterwards
if (reassign && regionStates.isRegionOffline(region)) {
assign(region, true);
}
}
}
public void unassign(HRegionInfo region, boolean force){
unassign(region, force, null);
}
/**
* @param region regioninfo of znode to be deleted.
*/
public void deleteClosingOrClosedNode(HRegionInfo region, ServerName sn) {
String encodedName = region.getEncodedName();
deleteNodeInStates(encodedName, "closing", sn, EventType.M_ZK_REGION_CLOSING,
EventType.RS_ZK_REGION_CLOSED);
}
/**
* @param path
* @return True if znode is in SPLIT or SPLITTING or MERGED or MERGING state.
* @throws KeeperException Can happen if the znode went away in meantime.
* @throws DeserializationException
*/
private boolean isSplitOrSplittingOrMergedOrMerging(final String path)
throws KeeperException, DeserializationException {
boolean result = false;
// This may fail if the SPLIT or SPLITTING or MERGED or MERGING znode gets
// cleaned up before we can get data from it.
byte [] data = ZKAssign.getData(watcher, path);
if (data == null) {
LOG.info("Node " + path + " is gone");
return false;
}
RegionTransition rt = RegionTransition.parseFrom(data);
switch (rt.getEventType()) {
case RS_ZK_REQUEST_REGION_SPLIT:
case RS_ZK_REGION_SPLIT:
case RS_ZK_REGION_SPLITTING:
case RS_ZK_REQUEST_REGION_MERGE:
case RS_ZK_REGION_MERGED:
case RS_ZK_REGION_MERGING:
result = true;
break;
default:
LOG.info("Node " + path + " is in " + rt.getEventType());
break;
}
return result;
}
/**
* Used by unit tests. Return the number of regions opened so far in the life
* of the master. Increases by one every time the master opens a region
* @return the counter value of the number of regions opened so far
*/
public int getNumRegionsOpened() {
return numRegionsOpened.get();
}
/**
* Waits until the specified region has completed assignment.
* <p>
* If the region is already assigned, returns immediately. Otherwise, method
* blocks until the region is assigned.
* @param regionInfo region to wait on assignment for
* @throws InterruptedException
*/
public boolean waitForAssignment(HRegionInfo regionInfo)
throws InterruptedException {
while (!regionStates.isRegionOnline(regionInfo)) {
if (regionStates.isRegionInState(regionInfo, State.FAILED_OPEN)
|| this.server.isStopped()) {
return false;
}
// We should receive a notification, but it's
// better to have a timeout to recheck the condition here:
// it lowers the impact of a race condition if any
regionStates.waitForUpdate(100);
}
return true;
}
/**
* Assigns the hbase:meta region.
* <p>
* Assumes that hbase:meta is currently closed and is not being actively served by
* any RegionServer.
* <p>
* Forcibly unsets the current meta region location in ZooKeeper and assigns
* hbase:meta to a random RegionServer.
* @throws KeeperException
*/
public void assignMeta() throws KeeperException {
MetaRegionTracker.deleteMetaLocation(this.watcher);
assign(HRegionInfo.FIRST_META_REGIONINFO, true);
}
/**
* Assigns specified regions retaining assignments, if any.
* <p>
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown
* @throws InterruptedException
* @throws IOException
*/
public void assign(Map<HRegionInfo, ServerName> regions)
throws IOException, InterruptedException {
if (regions == null || regions.isEmpty()) {
return;
}
List<ServerName> servers = serverManager.createDestinationServersList();
if (servers == null || servers.isEmpty()) {
throw new IOException("Found no destination server to assign region(s)");
}
// Reuse existing assignment info
Map<ServerName, List<HRegionInfo>> bulkPlan =
balancer.retainAssignment(regions, servers);
assign(regions.size(), servers.size(),
"retainAssignment=true", bulkPlan);
}
/**
* Assigns specified regions round robin, if any.
* <p>
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown
* @throws InterruptedException
* @throws IOException
*/
public void assign(List<HRegionInfo> regions)
throws IOException, InterruptedException {
if (regions == null || regions.isEmpty()) {
return;
}
List<ServerName> servers = serverManager.createDestinationServersList();
if (servers == null || servers.isEmpty()) {
throw new IOException("Found no destination server to assign region(s)");
}
// Generate a round-robin bulk assignment plan
Map<ServerName, List<HRegionInfo>> bulkPlan
= balancer.roundRobinAssignment(regions, servers);
processFavoredNodes(regions);
assign(regions.size(), servers.size(),
"round-robin=true", bulkPlan);
}
private void assign(int regions, int totalServers,
String message, Map<ServerName, List<HRegionInfo>> bulkPlan)
throws InterruptedException, IOException {
int servers = bulkPlan.size();
if (servers == 1 || (regions < bulkAssignThresholdRegions
&& servers < bulkAssignThresholdServers)) {
// Not use bulk assignment. This could be more efficient in small
// cluster, especially mini cluster for testing, so that tests won't time out
if (LOG.isTraceEnabled()) {
LOG.trace("Not using bulk assignment since we are assigning only " + regions +
" region(s) to " + servers + " server(s)");
}
for (Map.Entry<ServerName, List<HRegionInfo>> plan: bulkPlan.entrySet()) {
if (!assign(plan.getKey(), plan.getValue())) {
for (HRegionInfo region: plan.getValue()) {
if (!regionStates.isRegionOnline(region)) {
invokeAssign(region);
}
}
}
}
} else {
LOG.info("Bulk assigning " + regions + " region(s) across "
+ totalServers + " server(s), " + message);
// Use fixed count thread pool assigning.
BulkAssigner ba = new GeneralBulkAssigner(
this.server, bulkPlan, this, bulkAssignWaitTillAllAssigned);
ba.bulkAssign();
LOG.info("Bulk assigning done");
}
}
/**
* Assigns all user regions, if any exist. Used during cluster startup.
* <p>
* This is a synchronous call and will return once every region has been
* assigned. If anything fails, an exception is thrown and the cluster
* should be shutdown.
* @throws InterruptedException
* @throws IOException
* @throws KeeperException
*/
private void assignAllUserRegions(Set<TableName> disabledOrDisablingOrEnabling)
throws IOException, InterruptedException, KeeperException {
// Skip assignment for regions of tables in DISABLING state because during clean cluster startup
// no RS is alive and regions map also doesn't have any information about the regions.
// See HBASE-6281.
// Scan hbase:meta for all user regions, skipping any disabled tables
Map<HRegionInfo, ServerName> allRegions;
SnapshotOfRegionAssignmentFromMeta snapshotOfRegionAssignment =
new SnapshotOfRegionAssignmentFromMeta(catalogTracker, disabledOrDisablingOrEnabling, true);
snapshotOfRegionAssignment.initialize();
allRegions = snapshotOfRegionAssignment.getRegionToRegionServerMap();
if (allRegions == null || allRegions.isEmpty()) {
return;
}
// Determine what type of assignment to do on startup
boolean retainAssignment = server.getConfiguration().
getBoolean("hbase.master.startup.retainassign", true);
if (retainAssignment) {
assign(allRegions);
} else {
List<HRegionInfo> regions = new ArrayList<HRegionInfo>(allRegions.keySet());
assign(regions);
}
for (HRegionInfo hri : allRegions.keySet()) {
TableName tableName = hri.getTable();
if (!zkTable.isEnabledTable(tableName)) {
setEnabledTable(tableName);
}
}
}
/**
* Wait until no regions in transition.
* @param timeout How long to wait.
* @return True if nothing in regions in transition.
* @throws InterruptedException
*/
boolean waitUntilNoRegionsInTransition(final long timeout)
throws InterruptedException {
// Blocks until there are no regions in transition. It is possible that
// there
// are regions in transition immediately after this returns but guarantees
// that if it returns without an exception that there was a period of time
// with no regions in transition from the point-of-view of the in-memory
// state of the Master.
final long endTime = System.currentTimeMillis() + timeout;
while (!this.server.isStopped() && regionStates.isRegionsInTransition()
&& endTime > System.currentTimeMillis()) {
regionStates.waitForUpdate(100);
}
return !regionStates.isRegionsInTransition();
}
/**
* Rebuild the list of user regions and assignment information.
* <p>
* Returns a map of servers that are not found to be online and the regions
* they were hosting.
* @return map of servers not online to their assigned regions, as stored
* in META
* @throws IOException
*/
Map<ServerName, List<HRegionInfo>> rebuildUserRegions() throws IOException, KeeperException {
Set<TableName> enablingTables = ZKTable.getEnablingTables(watcher);
Set<TableName> disabledOrEnablingTables = ZKTable.getDisabledTables(watcher);
disabledOrEnablingTables.addAll(enablingTables);
Set<TableName> disabledOrDisablingOrEnabling = ZKTable.getDisablingTables(watcher);
disabledOrDisablingOrEnabling.addAll(disabledOrEnablingTables);
// Region assignment from META
List<Result> results = MetaReader.fullScan(this.catalogTracker);
// Get any new but slow to checkin region server that joined the cluster
Set<ServerName> onlineServers = serverManager.getOnlineServers().keySet();
// Map of offline servers and their regions to be returned
Map<ServerName, List<HRegionInfo>> offlineServers =
new TreeMap<ServerName, List<HRegionInfo>>();
// Iterate regions in META
for (Result result : results) {
HRegionInfo regionInfo = HRegionInfo.getHRegionInfo(result);
if (regionInfo == null) continue;
State state = RegionStateStore.getRegionState(result);
ServerName regionLocation = RegionStateStore.getRegionServer(result);
regionStates.createRegionState(regionInfo, state, regionLocation);
if (!regionStates.isRegionInState(regionInfo, State.OPEN)) {
// Region is not open (either offline or in transition), skip
continue;
}
TableName tableName = regionInfo.getTable();
if (!onlineServers.contains(regionLocation)) {
// Region is located on a server that isn't online
List<HRegionInfo> offlineRegions = offlineServers.get(regionLocation);
if (offlineRegions == null) {
offlineRegions = new ArrayList<HRegionInfo>(1);
offlineServers.put(regionLocation, offlineRegions);
}
if (useZKForAssignment) {
regionStates.regionOffline(regionInfo);
}
offlineRegions.add(regionInfo);
} else if (!disabledOrEnablingTables.contains(tableName)) {
// Region is being served and on an active server
// add only if region not in disabled or enabling table
regionStates.updateRegionState(regionInfo, State.OPEN, regionLocation);
regionStates.regionOnline(regionInfo, regionLocation);
balancer.regionOnline(regionInfo, regionLocation);
} else if (useZKForAssignment) {
regionStates.regionOffline(regionInfo);
}
// need to enable the table if not disabled or disabling or enabling
// this will be used in rolling restarts
if (!disabledOrDisablingOrEnabling.contains(tableName)
&& !getZKTable().isEnabledTable(tableName)) {
setEnabledTable(tableName);
}
}
return offlineServers;
}
/**
* Recover the tables that were not fully moved to DISABLED state. These
* tables are in DISABLING state when the master restarted/switched.
*
* @throws KeeperException
* @throws TableNotFoundException
* @throws IOException
*/
private void recoverTableInDisablingState()
throws KeeperException, TableNotFoundException, IOException {
Set<TableName> disablingTables = ZKTable.getDisablingTables(watcher);
if (disablingTables.size() != 0) {
for (TableName tableName : disablingTables) {
// Recover by calling DisableTableHandler
LOG.info("The table " + tableName
+ " is in DISABLING state. Hence recovering by moving the table"
+ " to DISABLED state.");
new DisableTableHandler(this.server, tableName, catalogTracker,
this, tableLockManager, true).prepare().process();
}
}
}
/**
* Recover the tables that are not fully moved to ENABLED state. These tables
* are in ENABLING state when the master restarted/switched
*
* @throws KeeperException
* @throws org.apache.hadoop.hbase.TableNotFoundException
* @throws IOException
*/
private void recoverTableInEnablingState()
throws KeeperException, TableNotFoundException, IOException {
Set<TableName> enablingTables = ZKTable.getEnablingTables(watcher);
if (enablingTables.size() != 0) {
for (TableName tableName : enablingTables) {
// Recover by calling EnableTableHandler
LOG.info("The table " + tableName
+ " is in ENABLING state. Hence recovering by moving the table"
+ " to ENABLED state.");
// enableTable in sync way during master startup,
// no need to invoke coprocessor
EnableTableHandler eth = new EnableTableHandler(this.server, tableName,
catalogTracker, this, tableLockManager, true);
try {
eth.prepare();
} catch (TableNotFoundException e) {
LOG.warn("Table " + tableName + " not found in hbase:meta to recover.");
continue;
}
eth.process();
}
}
}
/**
* Processes list of dead servers from result of hbase:meta scan and regions in RIT
* <p>
* This is used for failover to recover the lost regions that belonged to
* RegionServers which failed while there was no active master or regions
* that were in RIT.
* <p>
*
*
* @param deadServers
* The list of dead servers which failed while there was no active
* master. Can be null.
* @throws IOException
* @throws KeeperException
*/
private void processDeadServersAndRecoverLostRegions(
Map<ServerName, List<HRegionInfo>> deadServers)
throws IOException, KeeperException {
if (deadServers != null) {
for (Map.Entry<ServerName, List<HRegionInfo>> server: deadServers.entrySet()) {
ServerName serverName = server.getKey();
// We need to keep such info even if the server is known dead
regionStates.setLastRegionServerOfRegions(serverName, server.getValue());
if (!serverManager.isServerDead(serverName)) {
serverManager.expireServer(serverName); // Let SSH do region re-assign
}
}
}
List<String> nodes = useZKForAssignment ?
ZKUtil.listChildrenAndWatchForNewChildren(watcher, watcher.assignmentZNode)
: ZKUtil.listChildrenNoWatch(watcher, watcher.assignmentZNode);
if (nodes != null && !nodes.isEmpty()) {
for (String encodedRegionName : nodes) {
processRegionInTransition(encodedRegionName, null);
}
} else if (!useZKForAssignment) {
// We need to send RPC call again for PENDING_OPEN/PENDING_CLOSE regions
// in case the RPC call is not sent out yet before the master was shut down
// since we update the state before we send the RPC call. We can't update
// the state after the RPC call. Otherwise, we don't know what's happened
// to the region if the master dies right after the RPC call is out.
Map<String, RegionState> rits = regionStates.getRegionsInTransition();
for (RegionState regionState: rits.values()) {
if (!serverManager.isServerOnline(regionState.getServerName())) {
continue; // SSH will handle it
}
State state = regionState.getState();
LOG.info("Processing " + regionState);
switch (state) {
case CLOSED:
invokeAssign(regionState.getRegion());
break;
case PENDING_OPEN:
retrySendRegionOpen(regionState);
break;
case PENDING_CLOSE:
retrySendRegionClose(regionState);
break;
default:
// No process for other states
}
}
}
}
/**
* At master failover, for pending_open region, make sure
* sendRegionOpen RPC call is sent to the target regionserver
*/
private void retrySendRegionOpen(final RegionState regionState) {
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
HRegionInfo hri = regionState.getRegion();
ServerName serverName = regionState.getServerName();
ReentrantLock lock = locker.acquireLock(hri.getEncodedName());
try {
while (serverManager.isServerOnline(serverName)
&& !server.isStopped() && !server.isAborted()) {
try {
List<ServerName> favoredNodes = ServerName.EMPTY_SERVER_LIST;
if (shouldAssignRegionsWithFavoredNodes) {
favoredNodes = ((FavoredNodeLoadBalancer)balancer).getFavoredNodes(hri);
}
RegionOpeningState regionOpenState = serverManager.sendRegionOpen(
serverName, hri, -1, favoredNodes);
if (regionOpenState == RegionOpeningState.FAILED_OPENING) {
// Failed opening this region, this means the target server didn't get
// the original region open RPC, so re-assign it with a new plan
LOG.debug("Got failed_opening in retry sendRegionOpen for "
+ regionState + ", re-assign it");
invokeAssign(hri, true);
}
return; // Done.
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
// In case SocketTimeoutException/FailedServerException, we will retry
if (t instanceof java.net.SocketTimeoutException
|| t instanceof FailedServerException) {
Threads.sleep(100);
continue;
}
// For other exceptions, re-assign it
LOG.debug("Got exception in retry sendRegionOpen for "
+ regionState + ", re-assign it", t);
invokeAssign(hri);
return; // Done.
}
}
} finally {
lock.unlock();
}
}
});
}
/**
* At master failover, for pending_close region, make sure
* sendRegionClose RPC call is sent to the target regionserver
*/
private void retrySendRegionClose(final RegionState regionState) {
this.executorService.submit(
new EventHandler(server, EventType.M_MASTER_RECOVERY) {
@Override
public void process() throws IOException {
HRegionInfo hri = regionState.getRegion();
ServerName serverName = regionState.getServerName();
ReentrantLock lock = locker.acquireLock(hri.getEncodedName());
try {
while (serverManager.isServerOnline(serverName)
&& !server.isStopped() && !server.isAborted()) {
try {
if (!serverManager.sendRegionClose(serverName, hri, -1, null, false)) {
// This means the region is still on the target server
LOG.debug("Got false in retry sendRegionClose for "
+ regionState + ", re-close it");
invokeUnAssign(hri);
}
return; // Done.
} catch (Throwable t) {
if (t instanceof RemoteException) {
t = ((RemoteException) t).unwrapRemoteException();
}
// In case SocketTimeoutException/FailedServerException, we will retry
if (t instanceof java.net.SocketTimeoutException
|| t instanceof FailedServerException) {
Threads.sleep(100);
continue;
}
if (!(t instanceof NotServingRegionException
|| t instanceof RegionAlreadyInTransitionException)) {
// NotServingRegionException/RegionAlreadyInTransitionException
// means the target server got the original region close request.
// For other exceptions, re-close it
LOG.debug("Got exception in retry sendRegionClose for "
+ regionState + ", re-close it", t);
invokeUnAssign(hri);
}
return; // Done.
}
}
} finally {
lock.unlock();
}
}
});
}
/**
* Set Regions in transitions metrics.
* This takes an iterator on the RegionInTransition map (CLSM), and is not synchronized.
* This iterator is not fail fast, which may lead to stale read; but that's better than
* creating a copy of the map for metrics computation, as this method will be invoked
* on a frequent interval.
*/
public void updateRegionsInTransitionMetrics() {
long currentTime = System.currentTimeMillis();
int totalRITs = 0;
int totalRITsOverThreshold = 0;
long oldestRITTime = 0;
int ritThreshold = this.server.getConfiguration().
getInt(HConstants.METRICS_RIT_STUCK_WARNING_THRESHOLD, 60000);
for (RegionState state: regionStates.getRegionsInTransition().values()) {
totalRITs++;
long ritTime = currentTime - state.getStamp();
if (ritTime > ritThreshold) { // more than the threshold
totalRITsOverThreshold++;
}
if (oldestRITTime < ritTime) {
oldestRITTime = ritTime;
}
}
if (this.metricsAssignmentManager != null) {
this.metricsAssignmentManager.updateRITOldestAge(oldestRITTime);
this.metricsAssignmentManager.updateRITCount(totalRITs);
this.metricsAssignmentManager.updateRITCountOverThreshold(totalRITsOverThreshold);
}
}
/**
* @param region Region whose plan we are to clear.
*/
void clearRegionPlan(final HRegionInfo region) {
synchronized (this.regionPlans) {
this.regionPlans.remove(region.getEncodedName());
}
}
/**
* Wait on region to clear regions-in-transition.
* @param hri Region to wait on.
* @throws IOException
*/
public void waitOnRegionToClearRegionsInTransition(final HRegionInfo hri)
throws IOException, InterruptedException {
waitOnRegionToClearRegionsInTransition(hri, -1L);
}
/**
* Wait on region to clear regions-in-transition or time out
* @param hri
* @param timeOut Milliseconds to wait for current region to be out of transition state.
* @return True when a region clears regions-in-transition before timeout otherwise false
* @throws InterruptedException
*/
public boolean waitOnRegionToClearRegionsInTransition(final HRegionInfo hri, long timeOut)
throws InterruptedException {
if (!regionStates.isRegionInTransition(hri)) return true;
long end = (timeOut <= 0) ? Long.MAX_VALUE : EnvironmentEdgeManager.currentTimeMillis()
+ timeOut;
// There is already a timeout monitor on regions in transition so I
// should not have to have one here too?
LOG.info("Waiting for " + hri.getEncodedName() +
" to leave regions-in-transition, timeOut=" + timeOut + " ms.");
while (!this.server.isStopped() && regionStates.isRegionInTransition(hri)) {
regionStates.waitForUpdate(100);
if (EnvironmentEdgeManager.currentTimeMillis() > end) {
LOG.info("Timed out on waiting for " + hri.getEncodedName() + " to be assigned.");
return false;
}
}
if (this.server.isStopped()) {
LOG.info("Giving up wait on regions in transition because stoppable.isStopped is set");
return false;
}
return true;
}
/**
* Update timers for all regions in transition going against the server in the
* serversInUpdatingTimer.
*/
public class TimerUpdater extends Chore {
public TimerUpdater(final int period, final Stoppable stopper) {
super("AssignmentTimerUpdater", period, stopper);
}
@Override
protected void chore() {
Preconditions.checkState(tomActivated);
ServerName serverToUpdateTimer = null;
while (!serversInUpdatingTimer.isEmpty() && !stopper.isStopped()) {
if (serverToUpdateTimer == null) {
serverToUpdateTimer = serversInUpdatingTimer.first();
} else {
serverToUpdateTimer = serversInUpdatingTimer
.higher(serverToUpdateTimer);
}
if (serverToUpdateTimer == null) {
break;
}
updateTimers(serverToUpdateTimer);
serversInUpdatingTimer.remove(serverToUpdateTimer);
}
}
}
/**
* Monitor to check for time outs on region transition operations
*/
public class TimeoutMonitor extends Chore {
private boolean allRegionServersOffline = false;
private ServerManager serverManager;
private final int timeout;
/**
* Creates a periodic monitor to check for time outs on region transition
* operations. This will deal with retries if for some reason something
* doesn't happen within the specified timeout.
* @param period
* @param stopper When {@link Stoppable#isStopped()} is true, this thread will
* cleanup and exit cleanly.
* @param timeout
*/
public TimeoutMonitor(final int period, final Stoppable stopper,
ServerManager serverManager,
final int timeout) {
super("AssignmentTimeoutMonitor", period, stopper);
this.timeout = timeout;
this.serverManager = serverManager;
}
private synchronized void setAllRegionServersOffline(
boolean allRegionServersOffline) {
this.allRegionServersOffline = allRegionServersOffline;
}
@Override
protected void chore() {
Preconditions.checkState(tomActivated);
boolean noRSAvailable = this.serverManager.createDestinationServersList().isEmpty();
// Iterate all regions in transition checking for time outs
long now = System.currentTimeMillis();
// no lock concurrent access ok: we will be working on a copy, and it's java-valid to do
// a copy while another thread is adding/removing items
for (String regionName : regionStates.getRegionsInTransition().keySet()) {
RegionState regionState = regionStates.getRegionTransitionState(regionName);
if (regionState == null) continue;
if (regionState.getStamp() + timeout <= now) {
// decide on action upon timeout
actOnTimeOut(regionState);
} else if (this.allRegionServersOffline && !noRSAvailable) {
RegionPlan existingPlan = regionPlans.get(regionName);
if (existingPlan == null
|| !this.serverManager.isServerOnline(existingPlan
.getDestination())) {
// if some RSs just came back online, we can start the assignment
// right away
actOnTimeOut(regionState);
}
}
}
setAllRegionServersOffline(noRSAvailable);
}
private void actOnTimeOut(RegionState regionState) {
HRegionInfo regionInfo = regionState.getRegion();
LOG.info("Regions in transition timed out: " + regionState);
// Expired! Do a retry.
switch (regionState.getState()) {
case CLOSED:
LOG.info("Region " + regionInfo.getEncodedName()
+ " has been CLOSED for too long, waiting on queued "
+ "ClosedRegionHandler to run or server shutdown");
// Update our timestamp.
regionState.updateTimestampToNow();
break;
case OFFLINE:
LOG.info("Region has been OFFLINE for too long, " + "reassigning "
+ regionInfo.getRegionNameAsString() + " to a random server");
invokeAssign(regionInfo);
break;
case PENDING_OPEN:
LOG.info("Region has been PENDING_OPEN for too "
+ "long, reassigning region=" + regionInfo.getRegionNameAsString());
invokeAssign(regionInfo);
break;
case OPENING:
processOpeningState(regionInfo);
break;
case OPEN:
LOG.error("Region has been OPEN for too long, " +
"we don't know where region was opened so can't do anything");
regionState.updateTimestampToNow();
break;
case PENDING_CLOSE:
LOG.info("Region has been PENDING_CLOSE for too "
+ "long, running forced unassign again on region="
+ regionInfo.getRegionNameAsString());
invokeUnassign(regionInfo);
break;
case CLOSING:
LOG.info("Region has been CLOSING for too " +
"long, this should eventually complete or the server will " +
"expire, send RPC again");
invokeUnassign(regionInfo);
break;
case SPLIT:
case SPLITTING:
case FAILED_OPEN:
case FAILED_CLOSE:
case MERGING:
break;
default:
throw new IllegalStateException("Received event is not valid.");
}
}
}
private void processOpeningState(HRegionInfo regionInfo) {
LOG.info("Region has been OPENING for too long, reassigning region="
+ regionInfo.getRegionNameAsString());
// Should have a ZK node in OPENING state
try {
String node = ZKAssign.getNodeName(watcher, regionInfo.getEncodedName());
Stat stat = new Stat();
byte [] data = ZKAssign.getDataNoWatch(watcher, node, stat);
if (data == null) {
LOG.warn("Data is null, node " + node + " no longer exists");
return;
}
RegionTransition rt = RegionTransition.parseFrom(data);
EventType et = rt.getEventType();
if (et == EventType.RS_ZK_REGION_OPENED) {
LOG.debug("Region has transitioned to OPENED, allowing "
+ "watched event handlers to process");
return;
} else if (et != EventType.RS_ZK_REGION_OPENING && et != EventType.RS_ZK_REGION_FAILED_OPEN ) {
LOG.warn("While timing out a region, found ZK node in unexpected state: " + et);
return;
}
invokeAssign(regionInfo);
} catch (KeeperException ke) {
LOG.error("Unexpected ZK exception timing out CLOSING region", ke);
} catch (DeserializationException e) {
LOG.error("Unexpected exception parsing CLOSING region", e);
}
}
void invokeAssign(HRegionInfo regionInfo) {
invokeAssign(regionInfo, true);
}
void invokeAssign(HRegionInfo regionInfo, boolean newPlan) {
threadPoolExecutorService.submit(new AssignCallable(this, regionInfo, newPlan));
}
void invokeUnAssign(HRegionInfo regionInfo) {
threadPoolExecutorService.submit(new UnAssignCallable(this, regionInfo));
}
private void invokeUnassign(HRegionInfo regionInfo) {
threadPoolExecutorService.submit(new UnAssignCallable(this, regionInfo));
}
public boolean isCarryingMeta(ServerName serverName) {
return isCarryingRegion(serverName, HRegionInfo.FIRST_META_REGIONINFO);
}
/**
* Check if the shutdown server carries the specific region.
* We have a bunch of places that store region location
* Those values aren't consistent. There is a delay of notification.
* The location from zookeeper unassigned node has the most recent data;
* but the node could be deleted after the region is opened by AM.
* The AM's info could be old when OpenedRegionHandler
* processing hasn't finished yet when server shutdown occurs.
* @return whether the serverName currently hosts the region
*/
private boolean isCarryingRegion(ServerName serverName, HRegionInfo hri) {
RegionTransition rt = null;
try {
byte [] data = ZKAssign.getData(watcher, hri.getEncodedName());
// This call can legitimately come by null
rt = data == null? null: RegionTransition.parseFrom(data);
} catch (KeeperException e) {
server.abort("Exception reading unassigned node for region=" + hri.getEncodedName(), e);
} catch (DeserializationException e) {
server.abort("Exception parsing unassigned node for region=" + hri.getEncodedName(), e);
}
ServerName addressFromZK = rt != null? rt.getServerName(): null;
if (addressFromZK != null) {
// if we get something from ZK, we will use the data
boolean matchZK = addressFromZK.equals(serverName);
LOG.debug("Checking region=" + hri.getRegionNameAsString() + ", zk server=" + addressFromZK +
" current=" + serverName + ", matches=" + matchZK);
return matchZK;
}
ServerName addressFromAM = regionStates.getRegionServerOfRegion(hri);
boolean matchAM = (addressFromAM != null &&
addressFromAM.equals(serverName));
LOG.debug("based on AM, current region=" + hri.getRegionNameAsString() +
" is on server=" + (addressFromAM != null ? addressFromAM : "null") +
" server being checked: " + serverName);
return matchAM;
}
/**
* Process shutdown server removing any assignments.
* @param sn Server that went down.
* @return list of regions in transition on this server
*/
public List<HRegionInfo> processServerShutdown(final ServerName sn) {
// Clean out any existing assignment plans for this server
synchronized (this.regionPlans) {
for (Iterator <Map.Entry<String, RegionPlan>> i =
this.regionPlans.entrySet().iterator(); i.hasNext();) {
Map.Entry<String, RegionPlan> e = i.next();
ServerName otherSn = e.getValue().getDestination();
// The name will be null if the region is planned for a random assign.
if (otherSn != null && otherSn.equals(sn)) {
// Use iterator's remove else we'll get CME
i.remove();
}
}
}
List<HRegionInfo> regions = regionStates.serverOffline(watcher, sn);
for (Iterator<HRegionInfo> it = regions.iterator(); it.hasNext(); ) {
HRegionInfo hri = it.next();
String encodedName = hri.getEncodedName();
// We need a lock on the region as we could update it
Lock lock = locker.acquireLock(encodedName);
try {
RegionState regionState =
regionStates.getRegionTransitionState(encodedName);
if (regionState == null
|| (regionState.getServerName() != null && !regionState.isOnServer(sn))
|| !(regionState.isFailedClose() || regionState.isOffline()
|| regionState.isPendingOpenOrOpening())) {
LOG.info("Skip " + regionState + " since it is not opening/failed_close"
+ " on the dead server any more: " + sn);
it.remove();
} else {
try {
// Delete the ZNode if exists
ZKAssign.deleteNodeFailSilent(watcher, hri);
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
if (zkTable.isDisablingOrDisabledTable(hri.getTable())) {
regionStates.regionOffline(hri);
it.remove();
continue;
}
// Mark the region offline and assign it again by SSH
regionStates.updateRegionState(hri, State.OFFLINE);
}
} finally {
lock.unlock();
}
}
return regions;
}
/**
* @param plan Plan to execute.
*/
public void balance(final RegionPlan plan) {
HRegionInfo hri = plan.getRegionInfo();
TableName tableName = hri.getTable();
if (zkTable.isDisablingOrDisabledTable(tableName)) {
LOG.info("Ignored moving region of disabling/disabled table "
+ tableName);
return;
}
// Move the region only if it's assigned
String encodedName = hri.getEncodedName();
ReentrantLock lock = locker.acquireLock(encodedName);
try {
if (!regionStates.isRegionOnline(hri)) {
RegionState state = regionStates.getRegionState(encodedName);
LOG.info("Ignored moving region not assigned: " + hri + ", "
+ (state == null ? "not in region states" : state));
return;
}
synchronized (this.regionPlans) {
this.regionPlans.put(plan.getRegionName(), plan);
}
unassign(hri, false, plan.getDestination());
} finally {
lock.unlock();
}
}
public void stop() {
shutdown(); // Stop executor service, etc
if (tomActivated){
this.timeoutMonitor.interrupt();
this.timerUpdater.interrupt();
}
}
/**
* Shutdown the threadpool executor service
*/
public void shutdown() {
// It's an immediate shutdown, so we're clearing the remaining tasks.
synchronized (zkEventWorkerWaitingList){
zkEventWorkerWaitingList.clear();
}
threadPoolExecutorService.shutdownNow();
zkEventWorkers.shutdownNow();
regionStateStore.stop();
}
protected void setEnabledTable(TableName tableName) {
try {
this.zkTable.setEnabledTable(tableName);
} catch (KeeperException e) {
// here we can abort as it is the start up flow
String errorMsg = "Unable to ensure that the table " + tableName
+ " will be" + " enabled because of a ZooKeeper issue";
LOG.error(errorMsg);
this.server.abort(errorMsg, e);
}
}
/**
* Set region as OFFLINED up in zookeeper asynchronously.
* @param state
* @return True if we succeeded, false otherwise (State was incorrect or failed
* updating zk).
*/
private boolean asyncSetOfflineInZooKeeper(final RegionState state,
final AsyncCallback.StringCallback cb, final ServerName destination) {
if (!state.isClosed() && !state.isOffline()) {
this.server.abort("Unexpected state trying to OFFLINE; " + state,
new IllegalStateException());
return false;
}
regionStates.updateRegionState(state.getRegion(), State.OFFLINE);
try {
ZKAssign.asyncCreateNodeOffline(watcher, state.getRegion(),
destination, cb, state);
} catch (KeeperException e) {
if (e instanceof NodeExistsException) {
LOG.warn("Node for " + state.getRegion() + " already exists");
} else {
server.abort("Unexpected ZK exception creating/setting node OFFLINE", e);
}
return false;
}
return true;
}
private boolean deleteNodeInStates(String encodedName,
String desc, ServerName sn, EventType... types) {
try {
for (EventType et: types) {
if (ZKAssign.deleteNode(watcher, encodedName, et, sn)) {
return true;
}
}
LOG.info("Failed to delete the " + desc + " node for "
+ encodedName + ". The node type may not match");
} catch (NoNodeException e) {
if (LOG.isDebugEnabled()) {
LOG.debug("The " + desc + " node for " + encodedName + " already deleted");
}
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting " + desc
+ " node for the region " + encodedName, ke);
}
return false;
}
private void deleteMergingNode(String encodedName, ServerName sn) {
deleteNodeInStates(encodedName, "merging", sn, EventType.RS_ZK_REGION_MERGING,
EventType.RS_ZK_REQUEST_REGION_MERGE, EventType.RS_ZK_REGION_MERGED);
}
private void deleteSplittingNode(String encodedName, ServerName sn) {
deleteNodeInStates(encodedName, "splitting", sn, EventType.RS_ZK_REGION_SPLITTING,
EventType.RS_ZK_REQUEST_REGION_SPLIT, EventType.RS_ZK_REGION_SPLIT);
}
private void onRegionFailedOpen(
final HRegionInfo hri, final ServerName sn) {
String encodedName = hri.getEncodedName();
AtomicInteger failedOpenCount = failedOpenTracker.get(encodedName);
if (failedOpenCount == null) {
failedOpenCount = new AtomicInteger();
// No need to use putIfAbsent, or extra synchronization since
// this whole handleRegion block is locked on the encoded region
// name, and failedOpenTracker is updated only in this block
failedOpenTracker.put(encodedName, failedOpenCount);
}
if (failedOpenCount.incrementAndGet() >= maximumAttempts) {
regionStates.updateRegionState(hri, State.FAILED_OPEN);
// remove the tracking info to save memory, also reset
// the count for next open initiative
failedOpenTracker.remove(encodedName);
} else {
// Handle this the same as if it were opened and then closed.
RegionState regionState = regionStates.updateRegionState(hri, State.CLOSED);
if (regionState != null) {
// When there are more than one region server a new RS is selected as the
// destination and the same is updated in the region plan. (HBASE-5546)
Set<TableName> disablingOrDisabled = null;
try {
disablingOrDisabled = ZKTable.getDisablingTables(watcher);
disablingOrDisabled.addAll(ZKTable.getDisabledTables(watcher));
} catch (KeeperException e) {
server.abort("Cannot retrieve info about disabling or disabled tables ", e);
}
if (disablingOrDisabled.contains(hri.getTable())) {
offlineDisabledRegion(hri);
return;
}
// ZK Node is in CLOSED state, assign it.
regionStates.updateRegionState(hri, RegionState.State.CLOSED);
// This below has to do w/ online enable/disable of a table
removeClosedRegion(hri);
try {
getRegionPlan(hri, sn, true);
} catch (HBaseIOException e) {
LOG.warn("Failed to get region plan", e);
}
invokeAssign(hri, false);
}
}
}
private void onRegionOpen(
final HRegionInfo hri, final ServerName sn, long openSeqNum) {
regionOnline(hri, sn, openSeqNum);
if (useZKForAssignment) {
try {
// Delete the ZNode if exists
ZKAssign.deleteNodeFailSilent(watcher, hri);
} catch (KeeperException ke) {
server.abort("Unexpected ZK exception deleting node " + hri, ke);
}
}
// reset the count, if any
failedOpenTracker.remove(hri.getEncodedName());
if (isTableDisabledOrDisabling(hri.getTable())) {
invokeUnAssign(hri);
}
}
private void onRegionClosed(final HRegionInfo hri) {
if (isTableDisabledOrDisabling(hri.getTable())) {
offlineDisabledRegion(hri);
return;
}
regionStates.updateRegionState(hri, RegionState.State.CLOSED);
// This below has to do w/ online enable/disable of a table
removeClosedRegion(hri);
invokeAssign(hri, false);
}
private String onRegionSplit(ServerName sn, TransitionCode code,
HRegionInfo p, HRegionInfo a, HRegionInfo b) {
RegionState rs_p = regionStates.getRegionState(p);
RegionState rs_a = regionStates.getRegionState(a);
RegionState rs_b = regionStates.getRegionState(b);
if (!(rs_p.isOpenOrSplittingOnServer(sn)
&& (rs_a == null || rs_a.isOpenOrSplittingNewOnServer(sn))
&& (rs_b == null || rs_b.isOpenOrSplittingNewOnServer(sn)))) {
return "Not in state good for split";
}
regionStates.updateRegionState(a, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(b, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(p, State.SPLITTING);
if (code == TransitionCode.SPLIT) {
if (TEST_SKIP_SPLIT_HANDLING) {
return "Skipping split message, TEST_SKIP_SPLIT_HANDLING is set";
}
regionOffline(p, State.SPLIT);
regionOnline(a, sn, 1);
regionOnline(b, sn, 1);
// User could disable the table before master knows the new region.
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(a);
invokeUnAssign(b);
}
} else if (code == TransitionCode.SPLIT_PONR) {
try {
regionStateStore.splitRegion(p, a, b, sn);
} catch (IOException ioe) {
LOG.info("Failed to record split region " + p.getShortNameToLog());
return "Failed to record the splitting in meta";
}
} else if (code == TransitionCode.SPLIT_REVERTED) {
regionOnline(p, sn);
regionOffline(a);
regionOffline(b);
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(p);
}
}
return null;
}
private boolean isTableDisabledOrDisabling(TableName t) {
Set<TableName> disablingOrDisabled = null;
try {
disablingOrDisabled = ZKTable.getDisablingTables(watcher);
disablingOrDisabled.addAll(ZKTable.getDisabledTables(watcher));
} catch (KeeperException e) {
server.abort("Cannot retrieve info about disabling or disabled tables ", e);
}
return disablingOrDisabled.contains(t) ? true : false;
}
private String onRegionMerge(ServerName sn, TransitionCode code,
HRegionInfo p, HRegionInfo a, HRegionInfo b) {
RegionState rs_p = regionStates.getRegionState(p);
RegionState rs_a = regionStates.getRegionState(a);
RegionState rs_b = regionStates.getRegionState(b);
if (!(rs_a.isOpenOrMergingOnServer(sn) && rs_b.isOpenOrMergingOnServer(sn)
&& (rs_p == null || rs_p.isOpenOrMergingNewOnServer(sn)))) {
return "Not in state good for merge";
}
regionStates.updateRegionState(a, State.MERGING);
regionStates.updateRegionState(b, State.MERGING);
regionStates.updateRegionState(p, State.MERGING_NEW, sn);
String encodedName = p.getEncodedName();
if (code == TransitionCode.READY_TO_MERGE) {
mergingRegions.put(encodedName,
new PairOfSameType<HRegionInfo>(a, b));
} else if (code == TransitionCode.MERGED) {
mergingRegions.remove(encodedName);
regionOffline(a, State.MERGED);
regionOffline(b, State.MERGED);
regionOnline(p, sn, 1);
// User could disable the table before master knows the new region.
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(p);
}
} else if (code == TransitionCode.MERGE_PONR) {
try {
regionStateStore.mergeRegions(p, a, b, sn);
} catch (IOException ioe) {
LOG.info("Failed to record merged region " + p.getShortNameToLog());
return "Failed to record the merging in meta";
}
} else {
mergingRegions.remove(encodedName);
regionOnline(a, sn);
regionOnline(b, sn);
regionOffline(p);
if (isTableDisabledOrDisabling(p.getTable())) {
invokeUnAssign(a);
invokeUnAssign(b);
}
}
return null;
}
/**
* A helper to handle region merging transition event.
* It transitions merging regions to MERGING state.
*/
private boolean handleRegionMerging(final RegionTransition rt, final String encodedName,
final String prettyPrintedRegionName, final ServerName sn) {
if (!serverManager.isServerOnline(sn)) {
LOG.warn("Dropped merging! ServerName=" + sn + " unknown.");
return false;
}
byte [] payloadOfMerging = rt.getPayload();
List<HRegionInfo> mergingRegions;
try {
mergingRegions = HRegionInfo.parseDelimitedFrom(
payloadOfMerging, 0, payloadOfMerging.length);
} catch (IOException e) {
LOG.error("Dropped merging! Failed reading " + rt.getEventType()
+ " payload for " + prettyPrintedRegionName);
return false;
}
assert mergingRegions.size() == 3;
HRegionInfo p = mergingRegions.get(0);
HRegionInfo hri_a = mergingRegions.get(1);
HRegionInfo hri_b = mergingRegions.get(2);
RegionState rs_p = regionStates.getRegionState(p);
RegionState rs_a = regionStates.getRegionState(hri_a);
RegionState rs_b = regionStates.getRegionState(hri_b);
if (!((rs_a == null || rs_a.isOpenOrMergingOnServer(sn))
&& (rs_b == null || rs_b.isOpenOrMergingOnServer(sn))
&& (rs_p == null || rs_p.isOpenOrMergingNewOnServer(sn)))) {
LOG.warn("Dropped merging! Not in state good for MERGING; rs_p="
+ rs_p + ", rs_a=" + rs_a + ", rs_b=" + rs_b);
return false;
}
EventType et = rt.getEventType();
if (et == EventType.RS_ZK_REQUEST_REGION_MERGE) {
try {
if (RegionMergeTransaction.transitionMergingNode(watcher, p,
hri_a, hri_b, sn, -1, EventType.RS_ZK_REQUEST_REGION_MERGE,
EventType.RS_ZK_REGION_MERGING) == -1) {
byte[] data = ZKAssign.getData(watcher, encodedName);
EventType currentType = null;
if (data != null) {
RegionTransition newRt = RegionTransition.parseFrom(data);
currentType = newRt.getEventType();
}
if (currentType == null || (currentType != EventType.RS_ZK_REGION_MERGED
&& currentType != EventType.RS_ZK_REGION_MERGING)) {
LOG.warn("Failed to transition pending_merge node "
+ encodedName + " to merging, it's now " + currentType);
return false;
}
}
} catch (Exception e) {
LOG.warn("Failed to transition pending_merge node "
+ encodedName + " to merging", e);
return false;
}
}
synchronized (regionStates) {
regionStates.updateRegionState(hri_a, State.MERGING);
regionStates.updateRegionState(hri_b, State.MERGING);
regionStates.updateRegionState(p, State.MERGING_NEW, sn);
if (et != EventType.RS_ZK_REGION_MERGED) {
this.mergingRegions.put(encodedName,
new PairOfSameType<HRegionInfo>(hri_a, hri_b));
} else {
this.mergingRegions.remove(encodedName);
regionOffline(hri_a, State.MERGED);
regionOffline(hri_b, State.MERGED);
regionOnline(p, sn);
}
}
if (et == EventType.RS_ZK_REGION_MERGED) {
LOG.debug("Handling MERGED event for " + encodedName + "; deleting node");
// Remove region from ZK
try {
boolean successful = false;
while (!successful) {
// It's possible that the RS tickles in between the reading of the
// znode and the deleting, so it's safe to retry.
successful = ZKAssign.deleteNode(watcher, encodedName,
EventType.RS_ZK_REGION_MERGED, sn);
}
} catch (KeeperException e) {
if (e instanceof NoNodeException) {
String znodePath = ZKUtil.joinZNode(watcher.splitLogZNode, encodedName);
LOG.debug("The znode " + znodePath + " does not exist. May be deleted already.");
} else {
server.abort("Error deleting MERGED node " + encodedName, e);
}
}
LOG.info("Handled MERGED event; merged=" + p.getRegionNameAsString()
+ ", region_a=" + hri_a.getRegionNameAsString() + ", region_b="
+ hri_b.getRegionNameAsString() + ", on " + sn);
// User could disable the table before master knows the new region.
if (zkTable.isDisablingOrDisabledTable(p.getTable())) {
unassign(p);
}
}
return true;
}
/**
* A helper to handle region splitting transition event.
*/
private boolean handleRegionSplitting(final RegionTransition rt, final String encodedName,
final String prettyPrintedRegionName, final ServerName sn) {
if (!serverManager.isServerOnline(sn)) {
LOG.warn("Dropped splitting! ServerName=" + sn + " unknown.");
return false;
}
byte [] payloadOfSplitting = rt.getPayload();
List<HRegionInfo> splittingRegions;
try {
splittingRegions = HRegionInfo.parseDelimitedFrom(
payloadOfSplitting, 0, payloadOfSplitting.length);
} catch (IOException e) {
LOG.error("Dropped splitting! Failed reading " + rt.getEventType()
+ " payload for " + prettyPrintedRegionName);
return false;
}
assert splittingRegions.size() == 2;
HRegionInfo hri_a = splittingRegions.get(0);
HRegionInfo hri_b = splittingRegions.get(1);
RegionState rs_p = regionStates.getRegionState(encodedName);
RegionState rs_a = regionStates.getRegionState(hri_a);
RegionState rs_b = regionStates.getRegionState(hri_b);
if (!((rs_p == null || rs_p.isOpenOrSplittingOnServer(sn))
&& (rs_a == null || rs_a.isOpenOrSplittingNewOnServer(sn))
&& (rs_b == null || rs_b.isOpenOrSplittingNewOnServer(sn)))) {
LOG.warn("Dropped splitting! Not in state good for SPLITTING; rs_p="
+ rs_p + ", rs_a=" + rs_a + ", rs_b=" + rs_b);
return false;
}
if (rs_p == null) {
// Splitting region should be online
rs_p = regionStates.updateRegionState(rt, State.OPEN);
if (rs_p == null) {
LOG.warn("Received splitting for region " + prettyPrintedRegionName
+ " from server " + sn + " but it doesn't exist anymore,"
+ " probably already processed its split");
return false;
}
regionStates.regionOnline(rs_p.getRegion(), sn);
}
HRegionInfo p = rs_p.getRegion();
EventType et = rt.getEventType();
if (et == EventType.RS_ZK_REQUEST_REGION_SPLIT) {
try {
if (SplitTransaction.transitionSplittingNode(watcher, p,
hri_a, hri_b, sn, -1, EventType.RS_ZK_REQUEST_REGION_SPLIT,
EventType.RS_ZK_REGION_SPLITTING) == -1) {
byte[] data = ZKAssign.getData(watcher, encodedName);
EventType currentType = null;
if (data != null) {
RegionTransition newRt = RegionTransition.parseFrom(data);
currentType = newRt.getEventType();
}
if (currentType == null || (currentType != EventType.RS_ZK_REGION_SPLIT
&& currentType != EventType.RS_ZK_REGION_SPLITTING)) {
LOG.warn("Failed to transition pending_split node "
+ encodedName + " to splitting, it's now " + currentType);
return false;
}
}
} catch (Exception e) {
LOG.warn("Failed to transition pending_split node "
+ encodedName + " to splitting", e);
return false;
}
}
synchronized (regionStates) {
regionStates.updateRegionState(hri_a, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(hri_b, State.SPLITTING_NEW, sn);
regionStates.updateRegionState(rt, State.SPLITTING);
// The below is for testing ONLY! We can't do fault injection easily, so
// resort to this kinda uglyness -- St.Ack 02/25/2011.
if (TEST_SKIP_SPLIT_HANDLING) {
LOG.warn("Skipping split message, TEST_SKIP_SPLIT_HANDLING is set");
return true; // return true so that the splitting node stays
}
if (et == EventType.RS_ZK_REGION_SPLIT) {
regionOffline(p, State.SPLIT);
regionOnline(hri_a, sn);
regionOnline(hri_b, sn);
}
}
if (et == EventType.RS_ZK_REGION_SPLIT) {
LOG.debug("Handling SPLIT event for " + encodedName + "; deleting node");
// Remove region from ZK
try {
boolean successful = false;
while (!successful) {
// It's possible that the RS tickles in between the reading of the
// znode and the deleting, so it's safe to retry.
successful = ZKAssign.deleteNode(watcher, encodedName,
EventType.RS_ZK_REGION_SPLIT, sn);
}
} catch (KeeperException e) {
if (e instanceof NoNodeException) {
String znodePath = ZKUtil.joinZNode(watcher.splitLogZNode, encodedName);
LOG.debug("The znode " + znodePath + " does not exist. May be deleted already.");
} else {
server.abort("Error deleting SPLIT node " + encodedName, e);
}
}
LOG.info("Handled SPLIT event; parent=" + p.getRegionNameAsString()
+ ", daughter a=" + hri_a.getRegionNameAsString() + ", daughter b="
+ hri_b.getRegionNameAsString() + ", on " + sn);
// User could disable the table before master knows the new region.
if (zkTable.isDisablingOrDisabledTable(p.getTable())) {
unassign(hri_a);
unassign(hri_b);
}
}
return true;
}
/**
* A region is offline. The new state should be the specified one,
* if not null. If the specified state is null, the new state is Offline.
* The specified state can be Split/Merged/Offline/null only.
*/
private void regionOffline(final HRegionInfo regionInfo, final State state) {
regionStates.regionOffline(regionInfo, state);
removeClosedRegion(regionInfo);
// remove the region plan as well just in case.
clearRegionPlan(regionInfo);
balancer.regionOffline(regionInfo);
// Tell our listeners that a region was closed
sendRegionClosedNotification(regionInfo);
}
private void sendRegionOpenedNotification(final HRegionInfo regionInfo,
final ServerName serverName) {
if (!this.listeners.isEmpty()) {
for (AssignmentListener listener : this.listeners) {
listener.regionOpened(regionInfo, serverName);
}
}
}
private void sendRegionClosedNotification(final HRegionInfo regionInfo) {
if (!this.listeners.isEmpty()) {
for (AssignmentListener listener : this.listeners) {
listener.regionClosed(regionInfo);
}
}
}
/**
* Try to update some region states. If the state machine prevents
* such update, an error message is returned to explain the reason.
*
* It's expected that in each transition there should have just one
* region for opening/closing, 3 regions for splitting/merging.
* These regions should be on the server that requested the change.
*
* Region state machine. Only these transitions
* are expected to be triggered by a region server.
*
* On the state transition:
* (1) Open/Close should be initiated by master
* (a) Master sets the region to pending_open/pending_close
* in memory and hbase:meta after sending the request
* to the region server
* (b) Region server reports back to the master
* after open/close is done (either success/failure)
* (c) If region server has problem to report the status
* to master, it must be because the master is down or some
* temporary network issue. Otherwise, the region server should
* abort since it must be a bug. If the master is not accessible,
* the region server should keep trying until the server is
* stopped or till the status is reported to the (new) master
* (d) If region server dies in the middle of opening/closing
* a region, SSH picks it up and finishes it
* (e) If master dies in the middle, the new master recovers
* the state during initialization from hbase:meta. Region server
* can report any transition that has not been reported to
* the previous active master yet
* (2) Split/merge is initiated by region servers
* (a) To split a region, a region server sends a request
* to master to try to set a region to splitting, together with
* two daughters (to be created) to splitting new. If approved
* by the master, the splitting can then move ahead
* (b) To merge two regions, a region server sends a request to
* master to try to set the new merged region (to be created) to
* merging_new, together with two regions (to be merged) to merging.
* If it is ok with the master, the merge can then move ahead
* (c) Once the splitting/merging is done, the region server
* reports the status back to the master either success/failure.
* (d) Other scenarios should be handled similarly as for
* region open/close
*/
protected String onRegionTransition(final ServerName serverName,
final RegionStateTransition transition) {
TransitionCode code = transition.getTransitionCode();
HRegionInfo hri = HRegionInfo.convert(transition.getRegionInfo(0));
RegionState current = regionStates.getRegionState(hri);
if (LOG.isDebugEnabled()) {
LOG.debug("Got transition " + code + " for "
+ (current != null ? current.toString() : hri.getShortNameToLog())
+ " from " + serverName);
}
String errorMsg = null;
switch (code) {
case OPENED:
if (current != null && current.isOpened() && current.isOnServer(serverName)) {
LOG.info("Region " + hri.getShortNameToLog() + " is already " + current.getState() + " on "
+ serverName);
break;
}
case FAILED_OPEN:
if (current == null
|| !current.isPendingOpenOrOpeningOnServer(serverName)) {
errorMsg = hri.getShortNameToLog()
+ " is not pending open on " + serverName;
} else if (code == TransitionCode.FAILED_OPEN) {
onRegionFailedOpen(hri, serverName);
} else {
long openSeqNum = HConstants.NO_SEQNUM;
if (transition.hasOpenSeqNum()) {
openSeqNum = transition.getOpenSeqNum();
}
if (openSeqNum < 0) {
errorMsg = "Newly opened region has invalid open seq num " + openSeqNum;
} else {
onRegionOpen(hri, serverName, openSeqNum);
}
}
break;
case CLOSED:
if (current == null
|| !current.isPendingCloseOrClosingOnServer(serverName)) {
errorMsg = hri.getShortNameToLog()
+ " is not pending close on " + serverName;
} else {
onRegionClosed(hri);
}
break;
case READY_TO_SPLIT:
case SPLIT_PONR:
case SPLIT:
case SPLIT_REVERTED:
errorMsg = onRegionSplit(serverName, code, hri,
HRegionInfo.convert(transition.getRegionInfo(1)),
HRegionInfo.convert(transition.getRegionInfo(2)));
break;
case READY_TO_MERGE:
case MERGE_PONR:
case MERGED:
case MERGE_REVERTED:
errorMsg = onRegionMerge(serverName, code, hri,
HRegionInfo.convert(transition.getRegionInfo(1)),
HRegionInfo.convert(transition.getRegionInfo(2)));
break;
default:
errorMsg = "Unexpected transition code " + code;
}
if (errorMsg != null) {
LOG.error("Failed to transtion region from " + current + " to "
+ code + " by " + serverName + ": " + errorMsg);
}
return errorMsg;
}
/**
* @return Instance of load balancer
*/
public LoadBalancer getBalancer() {
return this.balancer;
}
}
| Jackygq1982/hbase_src | hbase-server/src/main/java/org/apache/hadoop/hbase/master/AssignmentManager.java | Java | apache-2.0 | 167,470 |
package io.dropwizard.jetty;
import com.codahale.metrics.MetricRegistry;
import com.codahale.metrics.jetty9.InstrumentedConnectionFactory;
import com.fasterxml.jackson.databind.ObjectMapper;
import io.dropwizard.configuration.ResourceConfigurationSourceProvider;
import io.dropwizard.configuration.YamlConfigurationFactory;
import io.dropwizard.jackson.DiscoverableSubtypeResolver;
import io.dropwizard.jackson.Jackson;
import io.dropwizard.logging.ConsoleAppenderFactory;
import io.dropwizard.logging.FileAppenderFactory;
import io.dropwizard.logging.SyslogAppenderFactory;
import io.dropwizard.util.DataSize;
import io.dropwizard.util.Duration;
import io.dropwizard.validation.BaseValidator;
import org.assertj.core.api.InstanceOfAssertFactories;
import org.eclipse.jetty.http.CookieCompliance;
import org.eclipse.jetty.http.HttpCompliance;
import org.eclipse.jetty.server.ForwardedRequestCustomizer;
import org.eclipse.jetty.server.HttpConfiguration;
import org.eclipse.jetty.server.HttpConnectionFactory;
import org.eclipse.jetty.server.ProxyConnectionFactory;
import org.eclipse.jetty.server.Server;
import org.eclipse.jetty.server.ServerConnector;
import org.eclipse.jetty.util.thread.QueuedThreadPool;
import org.eclipse.jetty.util.thread.ScheduledExecutorScheduler;
import org.eclipse.jetty.util.thread.ThreadPool;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import javax.validation.Validator;
import java.util.Optional;
import static org.assertj.core.api.Assertions.assertThat;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
class HttpConnectorFactoryTest {
private final ObjectMapper objectMapper = Jackson.newObjectMapper();
private final Validator validator = BaseValidator.newValidator();
@BeforeEach
void setUp() {
objectMapper.getSubtypeResolver().registerSubtypes(ConsoleAppenderFactory.class,
FileAppenderFactory.class, SyslogAppenderFactory.class, HttpConnectorFactory.class);
}
@Test
void isDiscoverable() {
assertThat(new DiscoverableSubtypeResolver().getDiscoveredSubtypes())
.contains(HttpConnectorFactory.class);
}
@Test
void testParseMinimalConfiguration() throws Exception {
HttpConnectorFactory http =
new YamlConfigurationFactory<>(HttpConnectorFactory.class, validator, objectMapper, "dw")
.build(new ResourceConfigurationSourceProvider(), "yaml/http-connector-minimal.yml");
assertThat(http.getPort()).isEqualTo(8080);
assertThat(http.getBindHost()).isNull();
assertThat(http.isInheritChannel()).isFalse();
assertThat(http.getHeaderCacheSize()).isEqualTo(DataSize.bytes(512));
assertThat(http.getOutputBufferSize()).isEqualTo(DataSize.kibibytes(32));
assertThat(http.getMaxRequestHeaderSize()).isEqualTo(DataSize.kibibytes(8));
assertThat(http.getMaxResponseHeaderSize()).isEqualTo(DataSize.kibibytes(8));
assertThat(http.getInputBufferSize()).isEqualTo(DataSize.kibibytes(8));
assertThat(http.getIdleTimeout()).isEqualTo(Duration.seconds(30));
assertThat(http.getMinBufferPoolSize()).isEqualTo(DataSize.bytes(64));
assertThat(http.getBufferPoolIncrement()).isEqualTo(DataSize.bytes(1024));
assertThat(http.getMaxBufferPoolSize()).isEqualTo(DataSize.kibibytes(64));
assertThat(http.getMinRequestDataPerSecond()).isEqualTo(DataSize.bytes(0));
assertThat(http.getMinResponseDataPerSecond()).isEqualTo(DataSize.bytes(0));
assertThat(http.getAcceptorThreads()).isEmpty();
assertThat(http.getSelectorThreads()).isEmpty();
assertThat(http.getAcceptQueueSize()).isNull();
assertThat(http.isReuseAddress()).isTrue();
assertThat(http.isUseServerHeader()).isFalse();
assertThat(http.isUseDateHeader()).isTrue();
assertThat(http.isUseForwardedHeaders()).isFalse();
assertThat(http.getHttpCompliance()).isEqualTo(HttpCompliance.RFC7230);
assertThat(http.getRequestCookieCompliance()).isEqualTo(CookieCompliance.RFC6265);
assertThat(http.getResponseCookieCompliance()).isEqualTo(CookieCompliance.RFC6265);
}
@Test
void testParseFullConfiguration() throws Exception {
HttpConnectorFactory http =
new YamlConfigurationFactory<>(HttpConnectorFactory.class, validator, objectMapper, "dw")
.build(new ResourceConfigurationSourceProvider(), "yaml/http-connector.yml");
assertThat(http.getPort()).isEqualTo(9090);
assertThat(http.getBindHost()).isEqualTo("127.0.0.1");
assertThat(http.isInheritChannel()).isTrue();
assertThat(http.getHeaderCacheSize()).isEqualTo(DataSize.bytes(256));
assertThat(http.getOutputBufferSize()).isEqualTo(DataSize.kibibytes(128));
assertThat(http.getMaxRequestHeaderSize()).isEqualTo(DataSize.kibibytes(4));
assertThat(http.getMaxResponseHeaderSize()).isEqualTo(DataSize.kibibytes(4));
assertThat(http.getInputBufferSize()).isEqualTo(DataSize.kibibytes(4));
assertThat(http.getIdleTimeout()).isEqualTo(Duration.seconds(10));
assertThat(http.getMinBufferPoolSize()).isEqualTo(DataSize.bytes(128));
assertThat(http.getBufferPoolIncrement()).isEqualTo(DataSize.bytes(500));
assertThat(http.getMaxBufferPoolSize()).isEqualTo(DataSize.kibibytes(32));
assertThat(http.getMinRequestDataPerSecond()).isEqualTo(DataSize.bytes(42));
assertThat(http.getMinResponseDataPerSecond()).isEqualTo(DataSize.bytes(200));
assertThat(http.getAcceptorThreads()).contains(1);
assertThat(http.getSelectorThreads()).contains(4);
assertThat(http.getAcceptQueueSize()).isEqualTo(1024);
assertThat(http.isReuseAddress()).isFalse();
assertThat(http.isUseServerHeader()).isTrue();
assertThat(http.isUseDateHeader()).isFalse();
assertThat(http.isUseForwardedHeaders()).isTrue();
HttpConfiguration httpConfiguration = http.buildHttpConfiguration();
assertThat(httpConfiguration.getCustomizers()).hasAtLeastOneElementOfType(ForwardedRequestCustomizer.class);
assertThat(http.getHttpCompliance()).isEqualTo(HttpCompliance.RFC2616);
assertThat(http.getRequestCookieCompliance()).isEqualTo(CookieCompliance.RFC2965);
assertThat(http.getResponseCookieCompliance()).isEqualTo(CookieCompliance.RFC6265);
}
@Test
void testBuildConnector() throws Exception {
HttpConnectorFactory http = spy(new HttpConnectorFactory());
http.setBindHost("127.0.0.1");
http.setAcceptorThreads(Optional.of(1));
http.setSelectorThreads(Optional.of(2));
http.setAcceptQueueSize(1024);
http.setMinResponseDataPerSecond(DataSize.bytes(200));
http.setMinRequestDataPerSecond(DataSize.bytes(42));
http.setRequestCookieCompliance(CookieCompliance.RFC6265);
http.setResponseCookieCompliance(CookieCompliance.RFC6265);
MetricRegistry metrics = new MetricRegistry();
ThreadPool threadPool = new QueuedThreadPool();
Server server = null;
ServerConnector connector = null;
try {
server = new Server();
connector = (ServerConnector) http.build(server, metrics, "test-http-connector", threadPool);
assertThat(connector.getPort()).isEqualTo(8080);
assertThat(connector.getHost()).isEqualTo("127.0.0.1");
assertThat(connector.getAcceptQueueSize()).isEqualTo(1024);
assertThat(connector.getReuseAddress()).isTrue();
assertThat(connector.getIdleTimeout()).isEqualTo(30000);
assertThat(connector.getName()).isEqualTo("test-http-connector");
assertThat(connector.getServer()).isSameAs(server);
assertThat(connector.getScheduler()).isInstanceOf(ScheduledExecutorScheduler.class);
assertThat(connector.getExecutor()).isSameAs(threadPool);
verify(http).buildBufferPool(64, 1024, 64 * 1024);
assertThat(connector.getAcceptors()).isEqualTo(1);
assertThat(connector.getSelectorManager().getSelectorCount()).isEqualTo(2);
InstrumentedConnectionFactory connectionFactory =
(InstrumentedConnectionFactory) connector.getConnectionFactory("http/1.1");
assertThat(connectionFactory).isInstanceOf(InstrumentedConnectionFactory.class);
assertThat(connectionFactory)
.extracting("connectionFactory")
.asInstanceOf(InstanceOfAssertFactories.type(HttpConnectionFactory.class))
.satisfies(factory -> {
assertThat(factory.getInputBufferSize()).isEqualTo(8192);
assertThat(factory.getHttpCompliance()).isEqualByComparingTo(HttpCompliance.RFC7230);
})
.extracting(HttpConnectionFactory::getHttpConfiguration)
.satisfies(config -> {
assertThat(config.getHeaderCacheSize()).isEqualTo(512);
assertThat(config.getOutputBufferSize()).isEqualTo(32768);
assertThat(config.getRequestHeaderSize()).isEqualTo(8192);
assertThat(config.getResponseHeaderSize()).isEqualTo(8192);
assertThat(config.getSendDateHeader()).isTrue();
assertThat(config.getSendServerVersion()).isFalse();
assertThat(config.getCustomizers()).noneMatch(customizer -> customizer.getClass().equals(ForwardedRequestCustomizer.class));
assertThat(config.getMinRequestDataRate()).isEqualTo(42);
assertThat(config.getMinResponseDataRate()).isEqualTo(200);
assertThat(config.getRequestCookieCompliance()).isEqualTo(CookieCompliance.RFC6265);
assertThat(config.getResponseCookieCompliance()).isEqualTo(CookieCompliance.RFC6265);
});
} finally {
if (connector != null) {
connector.stop();
}
if (server != null) {
server.stop();
}
}
}
@Test
void testBuildConnectorWithProxyProtocol() throws Exception {
HttpConnectorFactory http = new HttpConnectorFactory();
http.setBindHost("127.0.0.1");
http.setUseProxyProtocol(true);
MetricRegistry metrics = new MetricRegistry();
ThreadPool threadPool = new QueuedThreadPool();
Server server = null;
ServerConnector connector = null;
try {
server = new Server();
connector = (ServerConnector) http.build(server, metrics, "test-http-connector-with-proxy-protocol", threadPool);
assertThat(connector.getConnectionFactories().toArray()[0]).isInstanceOf(ProxyConnectionFactory.class);
} finally {
if (connector != null) {
connector.stop();
}
if (server != null) {
server.stop();
}
}
}
@Test
void testDefaultAcceptQueueSize() throws Exception {
HttpConnectorFactory http = new HttpConnectorFactory();
http.setBindHost("127.0.0.1");
http.setAcceptorThreads(Optional.of(1));
http.setSelectorThreads(Optional.of(2));
MetricRegistry metrics = new MetricRegistry();
ThreadPool threadPool = new QueuedThreadPool();
Server server = null;
ServerConnector connector = null;
try {
server = new Server();
connector = (ServerConnector) http.build(server, metrics, "test-http-connector", threadPool);
assertThat(connector.getAcceptQueueSize()).isEqualTo(NetUtil.getTcpBacklog());
} finally {
if (connector != null) {
connector.stop();
}
if (server != null) {
server.stop();
}
}
}
}
| phambryan/dropwizard | dropwizard-jetty/src/test/java/io/dropwizard/jetty/HttpConnectorFactoryTest.java | Java | apache-2.0 | 12,103 |
/* -*- mode: c; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/* lib/krb5/krb/pac.c */
/*
* Copyright 2008 by the Massachusetts Institute of Technology.
* All Rights Reserved.
*
* Export of this software from the United States of America may
* require a specific license from the United States Government.
* It is the responsibility of any person or organization contemplating
* export to obtain such a license before exporting.
*
* WITHIN THAT CONSTRAINT, permission to use, copy, modify, and
* distribute this software and its documentation for any purpose and
* without fee is hereby granted, provided that the above copyright
* notice appear in all copies and that both that copyright notice and
* this permission notice appear in supporting documentation, and that
* the name of M.I.T. not be used in advertising or publicity pertaining
* to distribution of the software without specific, written prior
* permission. Furthermore if you modify this software you must label
* your software as modified software and not distribute it in such a
* fashion that it might be confused with the original M.I.T. software.
* M.I.T. makes no representations about the suitability of
* this software for any purpose. It is provided "as is" without express
* or implied warranty.
*/
#include "k5-int.h"
#include "authdata.h"
/* draft-brezak-win2k-krb-authz-00 */
/*
* Add a buffer to the provided PAC and update header.
*/
krb5_error_code
k5_pac_add_buffer(krb5_context context,
krb5_pac pac,
krb5_ui_4 type,
const krb5_data *data,
krb5_boolean zerofill,
krb5_data *out_data)
{
PACTYPE *header;
size_t header_len, i, pad = 0;
char *pac_data;
assert((data->data == NULL) == zerofill);
/* Check there isn't already a buffer of this type */
if (k5_pac_locate_buffer(context, pac, type, NULL) == 0) {
return EEXIST;
}
header = (PACTYPE *)realloc(pac->pac,
sizeof(PACTYPE) +
(pac->pac->cBuffers * sizeof(PAC_INFO_BUFFER)));
if (header == NULL) {
return ENOMEM;
}
pac->pac = header;
header_len = PACTYPE_LENGTH + (pac->pac->cBuffers * PAC_INFO_BUFFER_LENGTH);
if (data->length % PAC_ALIGNMENT)
pad = PAC_ALIGNMENT - (data->length % PAC_ALIGNMENT);
pac_data = realloc(pac->data.data,
pac->data.length + PAC_INFO_BUFFER_LENGTH + data->length + pad);
if (pac_data == NULL) {
return ENOMEM;
}
pac->data.data = pac_data;
/* Update offsets of existing buffers */
for (i = 0; i < pac->pac->cBuffers; i++)
pac->pac->Buffers[i].Offset += PAC_INFO_BUFFER_LENGTH;
/* Make room for new PAC_INFO_BUFFER */
memmove(pac->data.data + header_len + PAC_INFO_BUFFER_LENGTH,
pac->data.data + header_len,
pac->data.length - header_len);
memset(pac->data.data + header_len, 0, PAC_INFO_BUFFER_LENGTH);
/* Initialise new PAC_INFO_BUFFER */
pac->pac->Buffers[i].ulType = type;
pac->pac->Buffers[i].cbBufferSize = data->length;
pac->pac->Buffers[i].Offset = pac->data.length + PAC_INFO_BUFFER_LENGTH;
assert((pac->pac->Buffers[i].Offset % PAC_ALIGNMENT) == 0);
/* Copy in new PAC data and zero padding bytes */
if (zerofill)
memset(pac->data.data + pac->pac->Buffers[i].Offset, 0, data->length);
else
memcpy(pac->data.data + pac->pac->Buffers[i].Offset, data->data, data->length);
memset(pac->data.data + pac->pac->Buffers[i].Offset + data->length, 0, pad);
pac->pac->cBuffers++;
pac->data.length += PAC_INFO_BUFFER_LENGTH + data->length + pad;
if (out_data != NULL) {
out_data->data = pac->data.data + pac->pac->Buffers[i].Offset;
out_data->length = data->length;
}
pac->verified = FALSE;
return 0;
}
krb5_error_code KRB5_CALLCONV
krb5_pac_add_buffer(krb5_context context,
krb5_pac pac,
krb5_ui_4 type,
const krb5_data *data)
{
return k5_pac_add_buffer(context, pac, type, data, FALSE, NULL);
}
/*
* Free a PAC
*/
void KRB5_CALLCONV
krb5_pac_free(krb5_context context,
krb5_pac pac)
{
if (pac != NULL) {
zapfree(pac->data.data, pac->data.length);
free(pac->pac);
zapfree(pac, sizeof(*pac));
}
}
krb5_error_code
k5_pac_locate_buffer(krb5_context context,
const krb5_pac pac,
krb5_ui_4 type,
krb5_data *data)
{
PAC_INFO_BUFFER *buffer = NULL;
size_t i;
if (pac == NULL)
return EINVAL;
for (i = 0; i < pac->pac->cBuffers; i++) {
if (pac->pac->Buffers[i].ulType == type) {
if (buffer == NULL)
buffer = &pac->pac->Buffers[i];
else
return EINVAL;
}
}
if (buffer == NULL)
return ENOENT;
assert(buffer->Offset + buffer->cbBufferSize <= pac->data.length);
if (data != NULL) {
data->length = buffer->cbBufferSize;
data->data = pac->data.data + buffer->Offset;
}
return 0;
}
/*
* Find a buffer and copy data into output
*/
krb5_error_code KRB5_CALLCONV
krb5_pac_get_buffer(krb5_context context,
krb5_pac pac,
krb5_ui_4 type,
krb5_data *data)
{
krb5_data d;
krb5_error_code ret;
ret = k5_pac_locate_buffer(context, pac, type, &d);
if (ret != 0)
return ret;
data->data = k5memdup(d.data, d.length, &ret);
if (data->data == NULL)
return ret;
data->length = d.length;
return 0;
}
/*
* Return an array of the types of data in the PAC
*/
krb5_error_code KRB5_CALLCONV
krb5_pac_get_types(krb5_context context,
krb5_pac pac,
size_t *len,
krb5_ui_4 **types)
{
size_t i;
*types = (krb5_ui_4 *)malloc(pac->pac->cBuffers * sizeof(krb5_ui_4));
if (*types == NULL)
return ENOMEM;
*len = pac->pac->cBuffers;
for (i = 0; i < pac->pac->cBuffers; i++)
(*types)[i] = pac->pac->Buffers[i].ulType;
return 0;
}
/*
* Initialize PAC
*/
krb5_error_code KRB5_CALLCONV
krb5_pac_init(krb5_context context,
krb5_pac *ppac)
{
krb5_pac pac;
pac = (krb5_pac)malloc(sizeof(*pac));
if (pac == NULL)
return ENOMEM;
pac->pac = (PACTYPE *)malloc(sizeof(PACTYPE));
if (pac->pac == NULL) {
free(pac);
return ENOMEM;
}
pac->pac->cBuffers = 0;
pac->pac->Version = 0;
pac->data.length = PACTYPE_LENGTH;
pac->data.data = calloc(1, pac->data.length);
if (pac->data.data == NULL) {
krb5_pac_free(context, pac);
return ENOMEM;
}
pac->verified = FALSE;
*ppac = pac;
return 0;
}
static krb5_error_code
k5_pac_copy(krb5_context context,
krb5_pac src,
krb5_pac *dst)
{
size_t header_len;
krb5_ui_4 cbuffers;
krb5_error_code code;
krb5_pac pac;
cbuffers = src->pac->cBuffers;
if (cbuffers != 0)
cbuffers--;
header_len = sizeof(PACTYPE) + cbuffers * sizeof(PAC_INFO_BUFFER);
pac = (krb5_pac)malloc(sizeof(*pac));
if (pac == NULL)
return ENOMEM;
pac->pac = k5memdup(src->pac, header_len, &code);
if (pac->pac == NULL) {
free(pac);
return code;
}
code = krb5int_copy_data_contents(context, &src->data, &pac->data);
if (code != 0) {
free(pac->pac);
free(pac);
return ENOMEM;
}
pac->verified = src->verified;
*dst = pac;
return 0;
}
/*
* Parse the supplied data into the PAC allocated by this function
*/
krb5_error_code KRB5_CALLCONV
krb5_pac_parse(krb5_context context,
const void *ptr,
size_t len,
krb5_pac *ppac)
{
krb5_error_code ret;
size_t i;
const unsigned char *p = (const unsigned char *)ptr;
krb5_pac pac;
size_t header_len;
krb5_ui_4 cbuffers, version;
*ppac = NULL;
if (len < PACTYPE_LENGTH)
return ERANGE;
cbuffers = load_32_le(p);
p += 4;
version = load_32_le(p);
p += 4;
if (version != 0)
return EINVAL;
header_len = PACTYPE_LENGTH + (cbuffers * PAC_INFO_BUFFER_LENGTH);
if (len < header_len)
return ERANGE;
ret = krb5_pac_init(context, &pac);
if (ret != 0)
return ret;
pac->pac = (PACTYPE *)realloc(pac->pac,
sizeof(PACTYPE) + ((cbuffers - 1) * sizeof(PAC_INFO_BUFFER)));
if (pac->pac == NULL) {
krb5_pac_free(context, pac);
return ENOMEM;
}
pac->pac->cBuffers = cbuffers;
pac->pac->Version = version;
for (i = 0; i < pac->pac->cBuffers; i++) {
PAC_INFO_BUFFER *buffer = &pac->pac->Buffers[i];
buffer->ulType = load_32_le(p);
p += 4;
buffer->cbBufferSize = load_32_le(p);
p += 4;
buffer->Offset = load_64_le(p);
p += 8;
if (buffer->Offset % PAC_ALIGNMENT) {
krb5_pac_free(context, pac);
return EINVAL;
}
if (buffer->Offset < header_len ||
buffer->Offset + buffer->cbBufferSize > len) {
krb5_pac_free(context, pac);
return ERANGE;
}
}
pac->data.data = realloc(pac->data.data, len);
if (pac->data.data == NULL) {
krb5_pac_free(context, pac);
return ENOMEM;
}
memcpy(pac->data.data, ptr, len);
pac->data.length = len;
*ppac = pac;
return 0;
}
static krb5_error_code
k5_time_to_seconds_since_1970(int64_t ntTime, krb5_timestamp *elapsedSeconds)
{
uint64_t abstime;
ntTime /= 10000000;
abstime = ntTime > 0 ? ntTime - NT_TIME_EPOCH : -ntTime;
if (abstime > UINT32_MAX)
return ERANGE;
*elapsedSeconds = abstime;
return 0;
}
krb5_error_code
k5_seconds_since_1970_to_time(krb5_timestamp elapsedSeconds, uint64_t *ntTime)
{
*ntTime = elapsedSeconds;
if (elapsedSeconds > 0)
*ntTime += NT_TIME_EPOCH;
*ntTime *= 10000000;
return 0;
}
krb5_error_code
k5_pac_validate_client(krb5_context context,
const krb5_pac pac,
krb5_timestamp authtime,
krb5_const_principal principal)
{
krb5_error_code ret;
krb5_data client_info;
char *pac_princname;
unsigned char *p;
krb5_timestamp pac_authtime;
krb5_ui_2 pac_princname_length;
int64_t pac_nt_authtime;
krb5_principal pac_principal;
ret = k5_pac_locate_buffer(context, pac, KRB5_PAC_CLIENT_INFO,
&client_info);
if (ret != 0)
return ret;
if (client_info.length < PAC_CLIENT_INFO_LENGTH)
return ERANGE;
p = (unsigned char *)client_info.data;
pac_nt_authtime = load_64_le(p);
p += 8;
pac_princname_length = load_16_le(p);
p += 2;
ret = k5_time_to_seconds_since_1970(pac_nt_authtime, &pac_authtime);
if (ret != 0)
return ret;
if (client_info.length < PAC_CLIENT_INFO_LENGTH + pac_princname_length ||
pac_princname_length % 2)
return ERANGE;
ret = k5_utf16le_to_utf8(p, pac_princname_length, &pac_princname);
if (ret != 0)
return ret;
ret = krb5_parse_name_flags(context, pac_princname,
KRB5_PRINCIPAL_PARSE_NO_REALM, &pac_principal);
if (ret != 0) {
free(pac_princname);
return ret;
}
free(pac_princname);
if (pac_authtime != authtime ||
!krb5_principal_compare_flags(context,
pac_principal,
principal,
KRB5_PRINCIPAL_COMPARE_IGNORE_REALM))
ret = KRB5KRB_AP_WRONG_PRINC;
krb5_free_principal(context, pac_principal);
return ret;
}
static krb5_error_code
k5_pac_zero_signature(krb5_context context,
const krb5_pac pac,
krb5_ui_4 type,
krb5_data *data)
{
PAC_INFO_BUFFER *buffer = NULL;
size_t i;
assert(type == KRB5_PAC_SERVER_CHECKSUM ||
type == KRB5_PAC_PRIVSVR_CHECKSUM);
assert(data->length >= pac->data.length);
for (i = 0; i < pac->pac->cBuffers; i++) {
if (pac->pac->Buffers[i].ulType == type) {
buffer = &pac->pac->Buffers[i];
break;
}
}
if (buffer == NULL)
return ENOENT;
if (buffer->Offset + buffer->cbBufferSize > pac->data.length)
return ERANGE;
if (buffer->cbBufferSize < PAC_SIGNATURE_DATA_LENGTH)
return KRB5_BAD_MSIZE;
/* Zero out the data portion of the checksum only */
memset(data->data + buffer->Offset + PAC_SIGNATURE_DATA_LENGTH,
0,
buffer->cbBufferSize - PAC_SIGNATURE_DATA_LENGTH);
return 0;
}
static krb5_error_code
k5_pac_verify_server_checksum(krb5_context context,
const krb5_pac pac,
const krb5_keyblock *server)
{
krb5_error_code ret;
krb5_data pac_data; /* PAC with zeroed checksums */
krb5_checksum checksum;
krb5_data checksum_data;
krb5_boolean valid;
krb5_octet *p;
ret = k5_pac_locate_buffer(context, pac, KRB5_PAC_SERVER_CHECKSUM,
&checksum_data);
if (ret != 0)
return ret;
if (checksum_data.length < PAC_SIGNATURE_DATA_LENGTH)
return KRB5_BAD_MSIZE;
p = (krb5_octet *)checksum_data.data;
checksum.checksum_type = load_32_le(p);
checksum.length = checksum_data.length - PAC_SIGNATURE_DATA_LENGTH;
checksum.contents = p + PAC_SIGNATURE_DATA_LENGTH;
if (!krb5_c_is_keyed_cksum(checksum.checksum_type))
return KRB5KRB_AP_ERR_INAPP_CKSUM;
pac_data.length = pac->data.length;
pac_data.data = k5memdup(pac->data.data, pac->data.length, &ret);
if (pac_data.data == NULL)
return ret;
/* Zero out both checksum buffers */
ret = k5_pac_zero_signature(context, pac, KRB5_PAC_SERVER_CHECKSUM,
&pac_data);
if (ret != 0) {
free(pac_data.data);
return ret;
}
ret = k5_pac_zero_signature(context, pac, KRB5_PAC_PRIVSVR_CHECKSUM,
&pac_data);
if (ret != 0) {
free(pac_data.data);
return ret;
}
ret = krb5_c_verify_checksum(context, server,
KRB5_KEYUSAGE_APP_DATA_CKSUM,
&pac_data, &checksum, &valid);
free(pac_data.data);
if (ret != 0) {
return ret;
}
if (valid == FALSE)
ret = KRB5KRB_AP_ERR_BAD_INTEGRITY;
return ret;
}
static krb5_error_code
k5_pac_verify_kdc_checksum(krb5_context context,
const krb5_pac pac,
const krb5_keyblock *privsvr)
{
krb5_error_code ret;
krb5_data server_checksum, privsvr_checksum;
krb5_checksum checksum;
krb5_boolean valid;
krb5_octet *p;
ret = k5_pac_locate_buffer(context, pac, KRB5_PAC_PRIVSVR_CHECKSUM,
&privsvr_checksum);
if (ret != 0)
return ret;
if (privsvr_checksum.length < PAC_SIGNATURE_DATA_LENGTH)
return KRB5_BAD_MSIZE;
ret = k5_pac_locate_buffer(context, pac, KRB5_PAC_SERVER_CHECKSUM,
&server_checksum);
if (ret != 0)
return ret;
if (server_checksum.length < PAC_SIGNATURE_DATA_LENGTH)
return KRB5_BAD_MSIZE;
p = (krb5_octet *)privsvr_checksum.data;
checksum.checksum_type = load_32_le(p);
checksum.length = privsvr_checksum.length - PAC_SIGNATURE_DATA_LENGTH;
checksum.contents = p + PAC_SIGNATURE_DATA_LENGTH;
if (!krb5_c_is_keyed_cksum(checksum.checksum_type))
return KRB5KRB_AP_ERR_INAPP_CKSUM;
server_checksum.data += PAC_SIGNATURE_DATA_LENGTH;
server_checksum.length -= PAC_SIGNATURE_DATA_LENGTH;
ret = krb5_c_verify_checksum(context, privsvr,
KRB5_KEYUSAGE_APP_DATA_CKSUM,
&server_checksum, &checksum, &valid);
if (ret != 0)
return ret;
if (valid == FALSE)
ret = KRB5KRB_AP_ERR_BAD_INTEGRITY;
return ret;
}
krb5_error_code KRB5_CALLCONV
krb5_pac_verify(krb5_context context,
const krb5_pac pac,
krb5_timestamp authtime,
krb5_const_principal principal,
const krb5_keyblock *server,
const krb5_keyblock *privsvr)
{
krb5_error_code ret;
if (server != NULL) {
ret = k5_pac_verify_server_checksum(context, pac, server);
if (ret != 0)
return ret;
}
if (privsvr != NULL) {
ret = k5_pac_verify_kdc_checksum(context, pac, privsvr);
if (ret != 0)
return ret;
}
if (principal != NULL) {
ret = k5_pac_validate_client(context, pac, authtime, principal);
if (ret != 0)
return ret;
}
pac->verified = TRUE;
return 0;
}
/*
* PAC auth data attribute backend
*/
struct mspac_context {
krb5_pac pac;
};
static krb5_error_code
mspac_init(krb5_context kcontext, void **plugin_context)
{
*plugin_context = NULL;
return 0;
}
static void
mspac_flags(krb5_context kcontext,
void *plugin_context,
krb5_authdatatype ad_type,
krb5_flags *flags)
{
*flags = AD_USAGE_TGS_REQ;
}
static void
mspac_fini(krb5_context kcontext, void *plugin_context)
{
return;
}
static krb5_error_code
mspac_request_init(krb5_context kcontext,
krb5_authdata_context context,
void *plugin_context,
void **request_context)
{
struct mspac_context *pacctx;
pacctx = (struct mspac_context *)malloc(sizeof(*pacctx));
if (pacctx == NULL)
return ENOMEM;
pacctx->pac = NULL;
*request_context = pacctx;
return 0;
}
static krb5_error_code
mspac_import_authdata(krb5_context kcontext,
krb5_authdata_context context,
void *plugin_context,
void *request_context,
krb5_authdata **authdata,
krb5_boolean kdc_issued,
krb5_const_principal kdc_issuer)
{
krb5_error_code code;
struct mspac_context *pacctx = (struct mspac_context *)request_context;
if (kdc_issued)
return EINVAL;
if (pacctx->pac != NULL) {
krb5_pac_free(kcontext, pacctx->pac);
pacctx->pac = NULL;
}
assert(authdata[0] != NULL);
assert((authdata[0]->ad_type & AD_TYPE_FIELD_TYPE_MASK) ==
KRB5_AUTHDATA_WIN2K_PAC);
code = krb5_pac_parse(kcontext, authdata[0]->contents,
authdata[0]->length, &pacctx->pac);
return code;
}
static krb5_error_code
mspac_export_authdata(krb5_context kcontext,
krb5_authdata_context context,
void *plugin_context,
void *request_context,
krb5_flags usage,
krb5_authdata ***out_authdata)
{
struct mspac_context *pacctx = (struct mspac_context *)request_context;
krb5_error_code code;
krb5_authdata **authdata;
krb5_data data;
if (pacctx->pac == NULL)
return 0;
authdata = calloc(2, sizeof(krb5_authdata *));
if (authdata == NULL)
return ENOMEM;
authdata[0] = calloc(1, sizeof(krb5_authdata));
if (authdata[0] == NULL) {
free(authdata);
return ENOMEM;
}
authdata[1] = NULL;
code = krb5int_copy_data_contents(kcontext, &pacctx->pac->data, &data);
if (code != 0) {
krb5_free_authdata(kcontext, authdata);
return code;
}
authdata[0]->magic = KV5M_AUTHDATA;
authdata[0]->ad_type = KRB5_AUTHDATA_WIN2K_PAC;
authdata[0]->length = data.length;
authdata[0]->contents = (krb5_octet *)data.data;
authdata[1] = NULL;
*out_authdata = authdata;
return 0;
}
static krb5_error_code
mspac_verify(krb5_context kcontext,
krb5_authdata_context context,
void *plugin_context,
void *request_context,
const krb5_auth_context *auth_context,
const krb5_keyblock *key,
const krb5_ap_req *req)
{
krb5_error_code code;
struct mspac_context *pacctx = (struct mspac_context *)request_context;
if (pacctx->pac == NULL)
return EINVAL;
code = krb5_pac_verify(kcontext, pacctx->pac,
req->ticket->enc_part2->times.authtime,
req->ticket->enc_part2->client, key, NULL);
if (code != 0)
TRACE_MSPAC_VERIFY_FAIL(kcontext, code);
/*
* If the above verification failed, don't fail the whole authentication,
* just don't mark the PAC as verified. A checksum mismatch can occur if
* the PAC was copied from a cross-realm TGT by an ignorant KDC, and Apple
* macOS Server Open Directory (as of 10.6) generates PACs with no server
* checksum at all.
*/
return 0;
}
static void
mspac_request_fini(krb5_context kcontext,
krb5_authdata_context context,
void *plugin_context,
void *request_context)
{
struct mspac_context *pacctx = (struct mspac_context *)request_context;
if (pacctx != NULL) {
if (pacctx->pac != NULL)
krb5_pac_free(kcontext, pacctx->pac);
free(pacctx);
}
}
#define STRLENOF(x) (sizeof((x)) - 1)
static struct {
krb5_ui_4 type;
krb5_data attribute;
} mspac_attribute_types[] = {
{ (krb5_ui_4)-1, { KV5M_DATA, STRLENOF("urn:mspac:"),
"urn:mspac:" } },
{ KRB5_PAC_LOGON_INFO, { KV5M_DATA,
STRLENOF("urn:mspac:logon-info"),
"urn:mspac:logon-info" } },
{ KRB5_PAC_CREDENTIALS_INFO, { KV5M_DATA,
STRLENOF("urn:mspac:credentials-info"),
"urn:mspac:credentials-info" } },
{ KRB5_PAC_SERVER_CHECKSUM, { KV5M_DATA,
STRLENOF("urn:mspac:server-checksum"),
"urn:mspac:server-checksum" } },
{ KRB5_PAC_PRIVSVR_CHECKSUM, { KV5M_DATA,
STRLENOF("urn:mspac:privsvr-checksum"),
"urn:mspac:privsvr-checksum" } },
{ KRB5_PAC_CLIENT_INFO, { KV5M_DATA,
STRLENOF("urn:mspac:client-info"),
"urn:mspac:client-info" } },
{ KRB5_PAC_DELEGATION_INFO, { KV5M_DATA,
STRLENOF("urn:mspac:delegation-info"),
"urn:mspac:delegation-info" } },
{ KRB5_PAC_UPN_DNS_INFO, { KV5M_DATA,
STRLENOF("urn:mspac:upn-dns-info"),
"urn:mspac:upn-dns-info" } },
};
#define MSPAC_ATTRIBUTE_COUNT (sizeof(mspac_attribute_types)/sizeof(mspac_attribute_types[0]))
static krb5_error_code
mspac_type2attr(krb5_ui_4 type, krb5_data *attr)
{
unsigned int i;
for (i = 0; i < MSPAC_ATTRIBUTE_COUNT; i++) {
if (mspac_attribute_types[i].type == type) {
*attr = mspac_attribute_types[i].attribute;
return 0;
}
}
return ENOENT;
}
static krb5_error_code
mspac_attr2type(const krb5_data *attr, krb5_ui_4 *type)
{
unsigned int i;
for (i = 0; i < MSPAC_ATTRIBUTE_COUNT; i++) {
if (attr->length == mspac_attribute_types[i].attribute.length &&
strncasecmp(attr->data, mspac_attribute_types[i].attribute.data, attr->length) == 0) {
*type = mspac_attribute_types[i].type;
return 0;
}
}
if (attr->length > STRLENOF("urn:mspac:") &&
strncasecmp(attr->data, "urn:mspac:", STRLENOF("urn:mspac:")) == 0)
{
char *p = &attr->data[STRLENOF("urn:mspac:")];
char *endptr;
*type = strtoul(p, &endptr, 10);
if (*type != 0 && *endptr == '\0')
return 0;
}
return ENOENT;
}
static krb5_error_code
mspac_get_attribute_types(krb5_context kcontext,
krb5_authdata_context context,
void *plugin_context,
void *request_context,
krb5_data **out_attrs)
{
struct mspac_context *pacctx = (struct mspac_context *)request_context;
unsigned int i, j;
krb5_data *attrs;
krb5_error_code code;
if (pacctx->pac == NULL)
return ENOENT;
attrs = calloc(1 + pacctx->pac->pac->cBuffers + 1, sizeof(krb5_data));
if (attrs == NULL)
return ENOMEM;
j = 0;
/* The entire PAC */
code = krb5int_copy_data_contents(kcontext,
&mspac_attribute_types[0].attribute,
&attrs[j++]);
if (code != 0) {
free(attrs);
return code;
}
/* PAC buffers */
for (i = 0; i < pacctx->pac->pac->cBuffers; i++) {
krb5_data attr;
code = mspac_type2attr(pacctx->pac->pac->Buffers[i].ulType, &attr);
if (code == 0) {
code = krb5int_copy_data_contents(kcontext, &attr, &attrs[j++]);
if (code != 0) {
krb5int_free_data_list(kcontext, attrs);
return code;
}
} else {
int length;
length = asprintf(&attrs[j].data, "urn:mspac:%d",
pacctx->pac->pac->Buffers[i].ulType);
if (length < 0) {
krb5int_free_data_list(kcontext, attrs);
return ENOMEM;
}
attrs[j++].length = length;
}
}
attrs[j].data = NULL;
attrs[j].length = 0;
*out_attrs = attrs;
return 0;
}
static krb5_error_code
mspac_get_attribute(krb5_context kcontext,
krb5_authdata_context context,
void *plugin_context,
void *request_context,
const krb5_data *attribute,
krb5_boolean *authenticated,
krb5_boolean *complete,
krb5_data *value,
krb5_data *display_value,
int *more)
{
struct mspac_context *pacctx = (struct mspac_context *)request_context;
krb5_error_code code;
krb5_ui_4 type;
if (display_value != NULL) {
display_value->data = NULL;
display_value->length = 0;
}
if (*more != -1 || pacctx->pac == NULL)
return ENOENT;
/* If it didn't verify, pretend it didn't exist. */
if (!pacctx->pac->verified) {
TRACE_MSPAC_DISCARD_UNVERF(kcontext);
return ENOENT;
}
code = mspac_attr2type(attribute, &type);
if (code != 0)
return code;
/* -1 is a magic type that refers to the entire PAC */
if (type == (krb5_ui_4)-1) {
if (value != NULL)
code = krb5int_copy_data_contents(kcontext,
&pacctx->pac->data,
value);
else
code = 0;
} else {
if (value != NULL)
code = krb5_pac_get_buffer(kcontext, pacctx->pac, type, value);
else
code = k5_pac_locate_buffer(kcontext, pacctx->pac, type, NULL);
}
if (code == 0) {
*authenticated = pacctx->pac->verified;
*complete = TRUE;
}
*more = 0;
return code;
}
static krb5_error_code
mspac_set_attribute(krb5_context kcontext,
krb5_authdata_context context,
void *plugin_context,
void *request_context,
krb5_boolean complete,
const krb5_data *attribute,
const krb5_data *value)
{
struct mspac_context *pacctx = (struct mspac_context *)request_context;
krb5_error_code code;
krb5_ui_4 type;
if (pacctx->pac == NULL)
return ENOENT;
code = mspac_attr2type(attribute, &type);
if (code != 0)
return code;
/* -1 is a magic type that refers to the entire PAC */
if (type == (krb5_ui_4)-1) {
krb5_pac newpac;
code = krb5_pac_parse(kcontext, value->data, value->length, &newpac);
if (code != 0)
return code;
krb5_pac_free(kcontext, pacctx->pac);
pacctx->pac = newpac;
} else {
code = krb5_pac_add_buffer(kcontext, pacctx->pac, type, value);
}
return code;
}
static krb5_error_code
mspac_export_internal(krb5_context kcontext,
krb5_authdata_context context,
void *plugin_context,
void *request_context,
krb5_boolean restrict_authenticated,
void **ptr)
{
struct mspac_context *pacctx = (struct mspac_context *)request_context;
krb5_error_code code;
krb5_pac pac;
*ptr = NULL;
if (pacctx->pac == NULL)
return ENOENT;
if (restrict_authenticated && (pacctx->pac->verified) == FALSE)
return ENOENT;
code = krb5_pac_parse(kcontext, pacctx->pac->data.data,
pacctx->pac->data.length, &pac);
if (code == 0) {
pac->verified = pacctx->pac->verified;
*ptr = pac;
}
return code;
}
static void
mspac_free_internal(krb5_context kcontext,
krb5_authdata_context context,
void *plugin_context,
void *request_context,
void *ptr)
{
if (ptr != NULL)
krb5_pac_free(kcontext, (krb5_pac)ptr);
return;
}
static krb5_error_code
mspac_size(krb5_context kcontext,
krb5_authdata_context context,
void *plugin_context,
void *request_context,
size_t *sizep)
{
struct mspac_context *pacctx = (struct mspac_context *)request_context;
*sizep += sizeof(krb5_int32);
if (pacctx->pac != NULL)
*sizep += pacctx->pac->data.length;
*sizep += sizeof(krb5_int32);
return 0;
}
static krb5_error_code
mspac_externalize(krb5_context kcontext,
krb5_authdata_context context,
void *plugin_context,
void *request_context,
krb5_octet **buffer,
size_t *lenremain)
{
krb5_error_code code = 0;
struct mspac_context *pacctx = (struct mspac_context *)request_context;
size_t required = 0;
krb5_octet *bp;
size_t remain;
bp = *buffer;
remain = *lenremain;
if (pacctx->pac != NULL) {
mspac_size(kcontext, context, plugin_context,
request_context, &required);
if (required <= remain) {
krb5_ser_pack_int32((krb5_int32)pacctx->pac->data.length,
&bp, &remain);
krb5_ser_pack_bytes((krb5_octet *)pacctx->pac->data.data,
(size_t)pacctx->pac->data.length,
&bp, &remain);
krb5_ser_pack_int32((krb5_int32)pacctx->pac->verified,
&bp, &remain);
} else {
code = ENOMEM;
}
} else {
krb5_ser_pack_int32(0, &bp, &remain); /* length */
krb5_ser_pack_int32(0, &bp, &remain); /* verified */
}
*buffer = bp;
*lenremain = remain;
return code;
}
static krb5_error_code
mspac_internalize(krb5_context kcontext,
krb5_authdata_context context,
void *plugin_context,
void *request_context,
krb5_octet **buffer,
size_t *lenremain)
{
struct mspac_context *pacctx = (struct mspac_context *)request_context;
krb5_error_code code;
krb5_int32 ibuf;
krb5_octet *bp;
size_t remain;
krb5_pac pac = NULL;
bp = *buffer;
remain = *lenremain;
/* length */
code = krb5_ser_unpack_int32(&ibuf, &bp, &remain);
if (code != 0)
return code;
if (ibuf != 0) {
code = krb5_pac_parse(kcontext, bp, ibuf, &pac);
if (code != 0)
return code;
bp += ibuf;
remain -= ibuf;
}
/* verified */
code = krb5_ser_unpack_int32(&ibuf, &bp, &remain);
if (code != 0) {
krb5_pac_free(kcontext, pac);
return code;
}
if (pac != NULL) {
pac->verified = (ibuf != 0);
}
if (pacctx->pac != NULL) {
krb5_pac_free(kcontext, pacctx->pac);
}
pacctx->pac = pac;
*buffer = bp;
*lenremain = remain;
return 0;
}
static krb5_error_code
mspac_copy(krb5_context kcontext,
krb5_authdata_context context,
void *plugin_context,
void *request_context,
void *dst_plugin_context,
void *dst_request_context)
{
struct mspac_context *srcctx = (struct mspac_context *)request_context;
struct mspac_context *dstctx = (struct mspac_context *)dst_request_context;
krb5_error_code code = 0;
assert(dstctx != NULL);
assert(dstctx->pac == NULL);
if (srcctx->pac != NULL)
code = k5_pac_copy(kcontext, srcctx->pac, &dstctx->pac);
return code;
}
static krb5_authdatatype mspac_ad_types[] = { KRB5_AUTHDATA_WIN2K_PAC, 0 };
krb5plugin_authdata_client_ftable_v0 k5_mspac_ad_client_ftable = {
"mspac",
mspac_ad_types,
mspac_init,
mspac_fini,
mspac_flags,
mspac_request_init,
mspac_request_fini,
mspac_get_attribute_types,
mspac_get_attribute,
mspac_set_attribute,
NULL, /* delete_attribute_proc */
mspac_export_authdata,
mspac_import_authdata,
mspac_export_internal,
mspac_free_internal,
mspac_verify,
mspac_size,
mspac_externalize,
mspac_internalize,
mspac_copy
};
| gerritjvv/cryptoplayground | kerberos/kdc/src/krb5-1.16/src/lib/krb5/krb/pac.c | C | apache-2.0 | 34,236 |
package com.p.service;
import java.util.Collection;
import java.util.Optional;
import java.util.Random;
import java.util.UUID;
import javax.annotation.Resource;
import org.apache.log4j.Logger;
import org.hibernate.SessionFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.security.core.Authentication;
import org.springframework.security.core.context.SecurityContext;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.security.crypto.password.PasswordEncoder;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Isolation;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.util.Assert;
import com.p.model.Notificacion;
import com.p.model.Role;
import com.p.model.User;
import com.p.model.modelAux.RegisterUser;
import com.p.model.repositories.UserRepository;
@Service("usersService")
@Transactional(isolation = Isolation.READ_UNCOMMITTED)
public class UsersService {
protected static Logger logger = Logger.getLogger("service");
@Resource(name = "sessionFactory")
private SessionFactory sessionFactory;
@Autowired
private UserRepository repository;
@Autowired
private NotificacionService notificacionService;
@Autowired
private EmailManager emailManager;
@Autowired
private PasswordEncoder passwordEncoder;
@Transactional
/**
* Borra un usuario según sea usuari de la web (su id empieza por 1) o usuario de llavero(su id empieza por 0)
*
* @param id
* el id del usuario existente
*/
public void delete(Integer id) {
Assert.notNull(id);
Assert.isTrue(id > 0);
repository.delete(id);
}
/**
* Guarda o edita sengún si el ID esta o no relleno
*
* @param us
*/
@Transactional()
public User save(User us) {
gestionarAvatar(us);
gestionarAltaUsuario(us);
User usr = repository.save(us);
return usr;
}
protected void gestionarAltaUsuario(User us) {
if (us.getId() == null || us.getId().equals(0)) {
gestionarNotificacionAltaUsuario(us);
gestionarEmailAltaUsuario(us);
}
}
protected void gestionarEmailAltaUsuario(User us) {
emailManager.notify(us);
}
/**
* @param us
*/
protected void gestionarNotificacionAltaUsuario(User us) {
// Es nuevo usuario
// Le enviamos un email y una notificacion
Notificacion notificacion = notificacionService.create();
Optional<User> admin = repository.findAdministradores().stream()
.findFirst();
Assert.isTrue(admin.isPresent());
User administrador = admin.get();
notificacion.setEmisor(administrador);
notificacion.setReceptor(us);
notificacion.setTitulo("Gracias por registrarte en Pachanga!");
notificacion
.setContenido("¿Porque no completas tu perfil? Quedará mucho más mono :)");
notificacionService.save(notificacion);
}
/**
* @param us
*/
protected void gestionarAvatar(User us) {
if (us.getAvatar() == null) {
Random rd = new Random();
us.setAvatar(User.avatarCss[rd.nextInt(User.avatarCss.length)]);
}
}
@Transactional
public User getByEmail(String login) {
Assert.notNull(login);
Assert.isTrue(login.length() > 0);
return repository.findByEmail(login);
}
@Transactional
public User findOne(Integer id) {
Assert.notNull(id);
Assert.isTrue(id > -1);
return repository.findOne(id);
}
@Transactional
public Collection<User> findAll() {
return repository.findAll();
}
@Transactional
public Collection<User> findAllDifferent(String email) {
return repository.findAllDifferent(email);
}
@Transactional(readOnly = true)
/**
*
* @author David Romero Alcaide
* @return
*/
public User getPrincipal() {
User result;
SecurityContext context;
Authentication authentication;
Object principal;
// If the asserts in this method fail, then you're
// likely to have your Tomcat's working directory
// corrupt. Please, clear your browser's cache, stop
// Tomcat, update your Maven's project configuration,
// clean your project, clean Tomcat's working directory,
// republish your project, and start it over.
context = SecurityContextHolder.getContext();
Assert.notNull(context);
authentication = context.getAuthentication();
Assert.notNull(authentication);
principal = authentication.getPrincipal();
Assert.isTrue(principal instanceof org.springframework.security.core.userdetails.User);
result = getByEmail(((org.springframework.security.core.userdetails.User) principal)
.getUsername());
Assert.notNull(result);
Assert.isTrue(result.getId() != 0);
return result;
}
public User map(RegisterUser user) {
User usr = create();
usr.setEmail(user.getEmail());
usr.setPassword(user.getPassword());
return usr;
}
public User create() {
User user = new User();
user.setFirstName(" ");
user.setLastName(" ");
user.setRole(Role.ROLE_USER);
return user;
}
@Transactional
public void regenerarPassword(User user) {
String newPass = UUID.randomUUID().toString();
newPass = passwordEncoder.encode(newPass);
user.setPassword(newPass);
save(user);
emailManager.notifyNewPassword(user,newPass);
}
@Transactional(isolation = Isolation.READ_UNCOMMITTED)
public byte[] findImage(Integer id) {
Assert.notNull(id);
Assert.isTrue(id > 0);
return repository.findImage(id);
}
@Transactional(readOnly = true)
public Collection<? extends User> find(String texto) {
return repository.findFullText(texto);
}
}
| david-romero/Pachanga | src/main/java/com/p/service/UsersService.java | Java | apache-2.0 | 5,489 |
from must import MustHavePatterns
from successor import Successor
class TestSuccessor(object):
@classmethod
def setup_class(cls):
cls.test_patterns = MustHavePatterns(Successor)
def test_successor(self):
try:
self.test_patterns.create(Successor)
raise Exception("Recursive structure did not explode.")
except RuntimeError as re:
assert str(re).startswith("maximum recursion depth")
| umaptechnologies/must | examples/miscExamples/test_successor.py | Python | apache-2.0 | 457 |
(function() {
'use strict';
angular
.module('fitappApp')
.controller('RequestResetController', RequestResetController);
RequestResetController.$inject = ['$timeout', 'Auth'];
function RequestResetController ($timeout, Auth) {
var vm = this;
vm.error = null;
vm.errorEmailNotExists = null;
vm.requestReset = requestReset;
vm.resetAccount = {};
vm.success = null;
$timeout(function (){angular.element('#email').focus();});
function requestReset () {
vm.error = null;
vm.errorEmailNotExists = null;
Auth.resetPasswordInit(vm.resetAccount.email).then(function () {
vm.success = 'OK';
}).catch(function (response) {
vm.success = null;
if (response.status === 400 && response.data === 'e-mail address not registered') {
vm.errorEmailNotExists = 'ERROR';
} else {
vm.error = 'ERROR';
}
});
}
}
})();
| tomkasp/fitapp | src/main/webapp/app/account/reset/request/reset.request.controller.js | JavaScript | apache-2.0 | 1,089 |
#-*- encoding: utf-8 -*-
import csv, math, time, re, threading, sys
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
class ErAPI():
# Metodo constructor, seteos basicos necesarios de configuracion, instancia objetos utiles
def __init__(self):
self.data = {}
# Data format: {'XXCiro|BNC': {'id': 123456, 'nick': 'XXCiro', 'level': 49, 'strength': 532.5, 'rank_points': 1233354, 'citizenship': 'Argentina'}}
# Diccionario de puntos/rango
self.rank_required_points = {
"Recruit": 0,
"Private": 15,
"Private*": 45,
"Private**": 80,
"Private***": 120,
"Corporal": 170,
"Corporal*": 250,
"Corporal**": 350,
"Corporal***": 450,
"Sergeant": 600,
"Sergeant*": 800,
"Sergeant**": 1000,
"Sergeant***": 1400,
"Lieutenant": 1850,
"Lieutenant*": 2350,
"Lieutenant**": 3000,
"Lieutenant***": 3750,
"Captain": 5000,
"Captain*": 6500,
"Captain**": 9000,
"Captain***": 12000,
"Major": 15500,
"Major*": 20000,
"Major**": 25000,
"Major***": 31000,
"Commander": 40000,
"Commander*": 52000,
"Commander**": 67000,
"Commander***": 85000,
"Lt Colonel": 110000,
"Lt Colonel*": 140000,
"Lt Colonel**": 180000,
"Lt Colonel***": 225000,
"Colonel": 285000,
"Colonel*": 355000,
"Colonel**": 435000,
"Colonel***": 540000,
"General": 660000,
"General*": 800000,
"General**": 950000,
"General***": 1140000,
"Field Marshal": 1350000,
"Field Marshal*": 1600000,
"Field Marshal**": 1875000,
"Field Marshal***": 2185000,
"Supreme Marshal": 2550000,
"Supreme Marshal*": 3000000,
"Supreme Marshal**": 3500000,
"Supreme Marshal***": 4150000,
"National Force": 4900000,
"National Force*": 5800000,
"National Force**": 7000000,
"National Force***": 9000000,
"World Class Force": 11500000,
"World Class Force*": 14500000,
"World Class Force**": 18000000,
"World Class Force***": 22000000,
"Legendary Force": 26500000,
"Legendary Force*": 31500000,
"Legendary Force**": 37000000,
"Legendary Force***": 42000000,
"God of War": 50000000,
"God of War*": 100000000 ,
"God of War**": 200000000,
"God of War***": 500000000,
"Titan": 1000000000,
"Titan*": 2000000000,
"Titan**": 4000000000,
"Titan***": 10000000000}
# Lista ordenada de rangos segun importancia
self.rank_to_pos = [
"Recruit",
"Private",
"Private*",
"Private**",
"Private***",
"Corporal",
"Corporal*",
"Corporal**",
"Corporal***",
"Sergeant",
"Sergeant*",
"Sergeant**",
"Sergeant***",
"Lieutenant",
"Lieutenant*",
"Lieutenant**",
"Lieutenant***",
"Captain",
"Captain*",
"Captain**",
"Captain***",
"Major",
"Major*",
"Major**",
"Major***",
"Commander",
"Commander*",
"Commander**",
"Commander***",
"Lt Colonel",
"Lt Colonel*",
"Lt Colonel**",
"Lt Colonel***",
"Colonel",
"Colonel*",
"Colonel**",
"Colonel***",
"General",
"General*",
"General**",
"General***",
"Field Marshal",
"Field Marshal*",
"Field Marshal**",
"Field Marshal***",
"Supreme Marshal",
"Supreme Marshal*",
"Supreme Marshal**",
"Supreme Marshal***",
"National Force",
"National Force*",
"National Force**",
"National Force***",
"World Class Force",
"World Class Force*",
"World Class Force**",
"World Class Force***",
"Legendary Force",
"Legendary Force*",
"Legendary Force**",
"Legendary Force***",
"God of War",
"God of War*",
"God of War**",
"God of War***",
"Titan",
"Titan*",
"Titan**",
"Titan***",]
# Bandera de ejecucion, util en caso de que se decida matar de forma manual los threads para actualizar y guardar los datos
self.run = True
# Se paraleliza la carga de datos en un hilo nuevo, el cual es demonio del invocador en caso de "muerte prematura"
th = threading.Thread(target=self.data_loader)
th.daemon = True
th.start()
# Metodo invocador, carga datos y crea threads para guardar y actualizar informacion, solo llamado desde constructor
def data_loader(self):
self.load_data()
self.data_saver_th = threading.Thread(target=self.data_saver)
self.data_saver_th.daemon = True
self.data_saver_th.start()
self.data_updater_th = threading.Thread(target=self.data_updater)
self.data_updater_th.daemon = True
self.data_updater_th.start()
# Metodo para volcar informacion a archivo fisico, solo llamado de metodo data_loader
def data_saver(self):
while self.run:
self.save_data()
time.sleep(60)
# Metodo para actualizar informacion, solo llamado de metodo data_loader
def data_updater(self):
while self.run:
for irc_nick in self.data:
self.update_data(irc_nick)
time.sleep(30)
time.sleep(600)
# ---------------------------------------------------------------------------------- #
# @ PUBLIC METHODS #
# ---------------------------------------------------------------------------------- #
# Metodo para actualizar informacion local del objeto desde archivo
def load_data(self):
try:
f = open('data/er_nick-data.csv', 'rt')
reader = csv.reader(f)
for nick_irc,id,nick_er,level,strength,rank_points,citizenship in reader:
self.data[nick_irc] = {'id': int(id), 'nick': nick_er, 'level': int(level), 'strength': float(strength), 'rank_points': int(rank_points), 'citizenship': citizenship}
f.close()
except:
pass
# Metodo para guardar informacion local del objeto en archivo
def save_data(self):
try:
f = open('data/er_nick-data.csv', 'wt')
writer = csv.writer(f)
for u in self.data:
writer.writerow([u, self.data[u]['id'], self.data[u]['nick'], self.data[u]['level'], self.data[u]['strength'], self.data[u]['rank_points'], self.data[u]['citizenship']])
f.close()
except:
pass
# Metodo scraper para actualizar informacion local del objeto del nick de irc especificado
def update_data(self, irc_nick):
try:
id = self.data[irc_nick]['id']
c = urlopen('http://www.erepublik.com/es/citizen/profile/%d' % id)
page = c.read()
c.close()
self.data[irc_nick]['nick'] = re.search('<meta name="title" content="(.+?) - Ciudadano del Nuevo Mundo" \/>', page.decode('utf-8')).group(1)
self.data[irc_nick]['level'] = int(re.search('<strong class="citizen_level">(.+?)<\/strong>', page.decode('utf-8'), re.DOTALL).group(1))
self.data[irc_nick]['strength'] = float(re.search('<span class="military_box_info mb_bottom">(.+?)</span>', page.decode('utf-8'), re.DOTALL).group(1).strip('\r\n\t ').replace(',',''))
self.data[irc_nick]['rank_points'] = int(re.search('<span class="rank_numbers">(.+?) \/', page.decode('utf-8'), re.DOTALL).group(1).replace(',',''))
self.data[irc_nick]['citizenship'] = re.search('<a href="http\:\/\/www.erepublik.com\/es\/country\/society\/([^ \t\n\x0B\f\r]+?)">', page.decode('utf-8')).group(1)
except:
pass
# Metodo para actualizar informacion local del objeto con nick de irc e id especificados, fuerza actualizacion del mismo
def reg_nick_write(self, nick, id):
if(nick.lower() in self.data.keys()):
self.data[nick.lower()]['id'] = int(id)
else:
self.data[nick.lower()] = {'id': int(id), 'nick': nick, 'level': 1, 'strength': 0, 'rank_points': 0, 'citizenship': ''}
self.update_data(nick.lower())
# Metodo para obtener ID del nick de irc especificado
def get_id(self, nick):
return self.data[nick.lower()]['id']
# Metodo para obtener LEVEL del nick de irc especificado
def get_level(self, nick):
return self.data[nick.lower()]['level']
# Metodo para obtener STRENGTH del nick de irc especificado
def get_strength(self, nick):
return self.data[nick.lower()]['strength']
# Metodo para obtener RANK POINTS del nick de irc especificado
def get_rank_points(self, nick):
return self.data[nick.lower()]['rank_points']
# Metodo para obtener CITIZENSHIP del nick de irc especificado
def get_citizenship(self, nick):
return self.data[nick.lower()]['citizenship']
# Metodo para obtener NICK INGAME del nick de irc especificado
def get_nick(self, nick):
return self.data[nick.lower()]['nick']
# Metodo para obtener RANK NAME del nick de irc especificado
def calculate_rank_name(self, rank_points):
index = 0
for k in [key for key in self.rank_required_points.keys() if self.rank_required_points[key] < rank_points]:
if(self.rank_to_pos.index(k) > index):
index = self.rank_to_pos.index(k)
return self.rank_to_pos[index]
# Metodo para calcular DAÑO del nick de irc especificado segun datos adicionales
def calculate_damage(self, rank_points, strength, weapon_power, level, bonus):
index = 0
for k in [key for key in self.rank_required_points.keys() if self.rank_required_points[key] < rank_points]:
if(self.rank_to_pos.index(k) > index):
index = self.rank_to_pos.index(k)
return(math.trunc(((index / 20) + 0.3) * ((strength / 10) + 40) * (1 + (weapon_power / 100)) * (1.1 if level > 99 else 1) * bonus)) | CPedrini/TateTRES | erapi.py | Python | apache-2.0 | 11,009 |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html lang="en-GB" xml:lang="en-GB" xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8"/>
<title>amod</title>
<link rel="root" href=""/> <!-- for JS -->
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/4.4.0/css/font-awesome.min.css">
<link rel="stylesheet" type="text/css" href="../../css/jquery-ui-redmond.css"/>
<link rel="stylesheet" type="text/css" href="../../css/style.css"/>
<link rel="stylesheet" type="text/css" href="../../css/style-vis.css"/>
<link rel="stylesheet" type="text/css" href="../../css/hint.css"/>
<script type="text/javascript" src="../../lib/ext/head.load.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/anchor-js/3.2.2/anchor.min.js"></script>
<script>document.addEventListener("DOMContentLoaded", function(event) {anchors.add();});</script>
<!-- Set up this custom Google search at https://cse.google.com/cse/business/settings?cx=001145188882102106025:dl1mehhcgbo -->
<!-- DZ 2021-01-22: I am temporarily hiding the search field to find out whether it slows down loading of the title page.
<script>
(function() {
var cx = '001145188882102106025:dl1mehhcgbo';
var gcse = document.createElement('script');
gcse.type = 'text/javascript';
gcse.async = true;
gcse.src = 'https://cse.google.com/cse.js?cx=' + cx;
var s = document.getElementsByTagName('script')[0];
s.parentNode.insertBefore(gcse, s);
})();
</script> -->
<!-- <link rel="shortcut icon" href="favicon.ico"/> -->
</head>
<body>
<div id="main" class="center">
<div id="hp-header">
<table width="100%"><tr><td width="50%">
<span class="header-text"><a href="http://universaldependencies.org/#language-uk">home</a></span>
<span class="header-text"><a href="https://github.com/universaldependencies/docs/edit/pages-source/_uk/dep/amod.md" target="#">edit page</a></span>
<span class="header-text"><a href="https://github.com/universaldependencies/docs/issues">issue tracker</a></span>
</td><td>
<gcse:search></gcse:search>
</td></tr></table>
</div>
<hr/>
<div class="v2complete">
This page pertains to UD version 2.
</div>
<div id="content">
<noscript>
<div id="noscript">
It appears that you have Javascript disabled.
Please consider enabling Javascript for this page to see the visualizations.
</div>
</noscript>
<!-- The content may include scripts and styles, hence we must load the shared libraries before the content. -->
<script type="text/javascript">
console.time('loading libraries');
var root = '../../'; // filled in by jekyll
head.js(
// External libraries
// DZ: Copied from embedding.html. I don't know which one is needed for what, so I'm currently keeping them all.
root + 'lib/ext/jquery.min.js',
root + 'lib/ext/jquery.svg.min.js',
root + 'lib/ext/jquery.svgdom.min.js',
root + 'lib/ext/jquery.timeago.js',
root + 'lib/ext/jquery-ui.min.js',
root + 'lib/ext/waypoints.min.js',
root + 'lib/ext/jquery.address.min.js'
);
</script>
<h2><code>amod</code>: adjectival modifier</h2>
<p>An adjectival modifier of a noun is any adjectival phrase that serves to modify the meaning of the noun.</p>
<p>Exception: if the modifying adjectival word is pronominal (i.e. tagged <a href="">uk-pos/DET</a>), the relation is <a href="">det</a> instead of <code class="language-plaintext highlighter-rouge">amod</code>.</p>
<pre><code class="language-sdparse">Ніна їсть зелене яблуко . \n Nina is-eating (a) green apple .
amod(яблуко, зелене)
amod(apple, green)
</code></pre>
<!--~~~ conllu-->
<!--1 Ніна Ніна NPROP NSN _ 2 nsubj _ _-->
<!--2 їсть їсти VERB VPR3s _ 0 root _ _-->
<!--3 зелене зелений ADJ ASA _ 4 amod _ _-->
<!--4 яблуко яблуко NOUN NSA _ 2 dobj _ _-->
<!--~~~-->
<pre><code class="language-sdparse">Ігор взяв десятитисячну позику . \n Igor has taken (a) ten-thousand loan .
amod(позику, десятитисячну)
amod(loan, ten-thousand)
</code></pre>
<pre><code class="language-sdparse">Перший бігун був швидкий . \n The-first racer was fast .
amod(бігун, Перший)
amod(racer, The-first)
nsubj(швидкий, бігун)
nsubj(fast, racer)
</code></pre>
<pre><code class="language-sdparse">Швидкий бігун був перший . \n The-fast racer was first .
amod(бігун, Швидкий)
amod(racer, The-fast)
nsubj(перший, бігун)
nsubj(first, racer)
</code></pre>
<!-- Interlanguage links updated St lis 3 20:58:38 CET 2021 -->
<!-- "in other languages" links -->
<hr/>
amod in other languages:
[<a href="../../bej/dep/amod.html">bej</a>]
[<a href="../../bg/dep/amod.html">bg</a>]
[<a href="../../bm/dep/amod.html">bm</a>]
[<a href="../../cop/dep/amod.html">cop</a>]
[<a href="../../cs/dep/amod.html">cs</a>]
[<a href="../../de/dep/amod.html">de</a>]
[<a href="../../el/dep/amod.html">el</a>]
[<a href="../../en/dep/amod.html">en</a>]
[<a href="../../es/dep/amod.html">es</a>]
[<a href="../../et/dep/amod.html">et</a>]
[<a href="../../eu/dep/amod.html">eu</a>]
[<a href="../../fi/dep/amod.html">fi</a>]
[<a href="../../fr/dep/amod.html">fr</a>]
[<a href="../../fro/dep/amod.html">fro</a>]
[<a href="../../ga/dep/amod.html">ga</a>]
[<a href="../../gsw/dep/amod.html">gsw</a>]
[<a href="../../hy/dep/amod.html">hy</a>]
[<a href="../../it/dep/amod.html">it</a>]
[<a href="../../ja/dep/amod.html">ja</a>]
[<a href="../../kk/dep/amod.html">kk</a>]
[<a href="../../no/dep/amod.html">no</a>]
[<a href="../../pcm/dep/amod.html">pcm</a>]
[<a href="../../pt/dep/amod.html">pt</a>]
[<a href="../../ro/dep/amod.html">ro</a>]
[<a href="../../ru/dep/amod.html">ru</a>]
[<a href="../../sv/dep/amod.html">sv</a>]
[<a href="../../swl/dep/amod.html">swl</a>]
[<a href="../../tr/dep/amod.html">tr</a>]
[<a href="../../u/dep/amod.html">u</a>]
[<a href="../../uk/dep/amod.html">uk</a>]
[<a href="../../urj/dep/amod.html">urj</a>]
[<a href="../../vi/dep/amod.html">vi</a>]
[<a href="../../yue/dep/amod.html">yue</a>]
[<a href="../../zh/dep/amod.html">zh</a>]
</div>
<!-- support for embedded visualizations -->
<script type="text/javascript">
var root = '../../'; // filled in by jekyll
head.js(
// We assume that external libraries such as jquery.min.js have already been loaded outside!
// (See _layouts/base.html.)
// brat helper modules
root + 'lib/brat/configuration.js',
root + 'lib/brat/util.js',
root + 'lib/brat/annotation_log.js',
root + 'lib/ext/webfont.js',
// brat modules
root + 'lib/brat/dispatcher.js',
root + 'lib/brat/url_monitor.js',
root + 'lib/brat/visualizer.js',
// embedding configuration
root + 'lib/local/config.js',
// project-specific collection data
root + 'lib/local/collections.js',
// Annodoc
root + 'lib/annodoc/annodoc.js',
// NOTE: non-local libraries
'https://spyysalo.github.io/conllu.js/conllu.js'
);
var webFontURLs = [
// root + 'static/fonts/Astloch-Bold.ttf',
root + 'static/fonts/PT_Sans-Caption-Web-Regular.ttf',
root + 'static/fonts/Liberation_Sans-Regular.ttf'
];
var setupTimeago = function() {
jQuery("time.timeago").timeago();
};
head.ready(function() {
setupTimeago();
// mark current collection (filled in by Jekyll)
Collections.listing['_current'] = 'uk';
// perform all embedding and support functions
Annodoc.activate(Config.bratCollData, Collections.listing);
});
</script>
<!-- google analytics -->
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-55233688-1', 'auto');
ga('send', 'pageview');
</script>
<div id="footer">
<p class="footer-text">© 2014–2021
<a href="http://universaldependencies.org/introduction.html#contributors" style="color:gray">Universal Dependencies contributors</a>.
Site powered by <a href="http://spyysalo.github.io/annodoc" style="color:gray">Annodoc</a> and <a href="http://brat.nlplab.org/" style="color:gray">brat</a></p>.
</div>
</div>
</body>
</html>
| UniversalDependencies/universaldependencies.github.io | uk/dep/amod.html | HTML | apache-2.0 | 9,143 |
# Espeletiopsis cristalinensis (Cuatrec.) Cuatrec. SPECIES
#### Status
ACCEPTED
#### According to
International Plant Names Index
#### Published in
null
#### Original name
null
### Remarks
null | mdoering/backbone | life/Plantae/Magnoliophyta/Magnoliopsida/Asterales/Asteraceae/Espeletiopsis cristalinensis/README.md | Markdown | apache-2.0 | 198 |
/* Slideshow container */
.slideshow-container {
max-width: 1000px;
position: relative;
margin: auto;
}
/* Next & previous buttons */
.prev, .next {
cursor: pointer;
position: absolute;
top: 50%;
width: auto;
margin-top: -22px;
padding: 16px;
color: white;
font-weight: bold;
font-size: 18px;
transition: 0.6s ease;
border-radius: 0 3px 3px 0;
}
/* Position the "next button" to the right */
.next {
right: 0;
border-radius: 3px 0 0 3px;
}
/* On hover, add a black background color with a little bit see-through */
.prev:hover, .next:hover {
background-color: rgba(0,0,0,0.8);
}
/* Caption text */
.text {
color: #f2f2f2;
font-size: 15px;
padding: 8px 12px;
position: absolute;
bottom: 8px;
width: 100%;
text-align: center;
}
/* Number text (1/3 etc) */
.numbertext {
color: #f2f2f2;
font-size: 12px;
padding: 8px 12px;
position: absolute;
top: 0;
}
/* The dots/bullets/indicators */
.dot {
cursor:pointer;
height: 13px;
width: 13px;
margin: 0 2px;
background-color: #bbb;
border-radius: 50%;
display: inline-block;
transition: background-color 0.6s ease;
}
.active, .dot:hover {
background-color: #717171;
}
/* Fading animation */
.fade {
-webkit-animation-name: fade;
-webkit-animation-duration: 1.5s;
animation-name: fade;
animation-duration: 1.5s;
}
@-webkit-keyframes fade {
from {opacity: .4}
to {opacity: 1}
}
@keyframes fade {
from {opacity: .4}
to {opacity: 1}
} | ruchiishahh/tinoupcycling | css/slideshow.css | CSS | apache-2.0 | 1,475 |
package org.apache.solr.cloud;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import org.apache.http.params.CoreConnectionPNames;
import org.apache.solr.client.solrj.SolrQuery;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServer;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.impl.CloudSolrServer;
import org.apache.solr.client.solrj.impl.HttpSolrServer;
import org.apache.solr.client.solrj.request.QueryRequest;
import org.apache.solr.client.solrj.response.QueryResponse;
import org.apache.solr.common.SolrDocument;
import org.apache.solr.common.cloud.ClusterState;
import org.apache.solr.common.cloud.DocRouter;
import org.apache.solr.common.cloud.Slice;
import org.apache.solr.common.cloud.ZkCoreNodeProps;
import org.apache.solr.common.cloud.ZkStateReader;
import org.apache.solr.common.params.CollectionParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.Hash;
import org.apache.solr.handler.admin.CollectionsHandler;
import org.apache.solr.update.DirectUpdateHandler2;
import org.apache.zookeeper.KeeperException;
import org.junit.After;
import org.junit.Before;
import java.io.IOException;
import java.net.MalformedURLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class ShardSplitTest extends BasicDistributedZkTest {
public static final String SHARD1_0 = SHARD1 + "_0";
public static final String SHARD1_1 = SHARD1 + "_1";
@Override
@Before
public void setUp() throws Exception {
super.setUp();
System.setProperty("numShards", Integer.toString(sliceCount));
System.setProperty("solr.xml.persist", "true");
}
@Override
@After
public void tearDown() throws Exception {
super.tearDown();
if (VERBOSE || printLayoutOnTearDown) {
super.printLayout();
}
if (controlClient != null) {
controlClient.shutdown();
}
if (cloudClient != null) {
cloudClient.shutdown();
}
if (controlClientCloud != null) {
controlClientCloud.shutdown();
}
super.tearDown();
System.clearProperty("zkHost");
System.clearProperty("numShards");
System.clearProperty("solr.xml.persist");
// insurance
DirectUpdateHandler2.commitOnClose = true;
}
@Override
public void doTest() throws Exception {
waitForThingsToLevelOut(15);
ClusterState clusterState = cloudClient.getZkStateReader().getClusterState();
DocRouter router = clusterState.getCollection(AbstractDistribZkTestBase.DEFAULT_COLLECTION).getRouter();
Slice shard1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1);
DocRouter.Range shard1Range = shard1.getRange() != null ? shard1.getRange() : router.fullRange();
final List<DocRouter.Range> ranges = router.partitionRange(2, shard1Range);
final int[] docCounts = new int[ranges.size()];
int numReplicas = shard1.getReplicas().size();
del("*:*");
for (int id = 0; id < 100; id++) {
indexAndUpdateCount(ranges, docCounts, id);
}
commit();
Thread indexThread = new Thread() {
@Override
public void run() {
for (int id = 101; id < atLeast(401); id++) {
try {
indexAndUpdateCount(ranges, docCounts, id);
Thread.sleep(atLeast(25));
} catch (Exception e) {
log.error("Exception while adding doc", e);
}
}
}
};
indexThread.start();
splitShard(SHARD1);
log.info("Layout after split: \n");
printLayout();
indexThread.join();
commit();
checkDocCountsAndShardStates(docCounts, numReplicas);
// todo can't call waitForThingsToLevelOut because it looks for jettys of all shards
// and the new sub-shards don't have any.
waitForRecoveriesToFinish(true);
//waitForThingsToLevelOut(15);
}
protected void checkDocCountsAndShardStates(int[] docCounts, int numReplicas) throws SolrServerException, KeeperException, InterruptedException {
SolrQuery query = new SolrQuery("*:*").setRows(1000).setFields("id", "_version_");
query.set("distrib", false);
ZkCoreNodeProps shard1_0 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_0);
HttpSolrServer shard1_0Server = new HttpSolrServer(shard1_0.getCoreUrl());
QueryResponse response = shard1_0Server.query(query);
long shard10Count = response.getResults().getNumFound();
ZkCoreNodeProps shard1_1 = getLeaderUrlFromZk(AbstractDistribZkTestBase.DEFAULT_COLLECTION, SHARD1_1);
HttpSolrServer shard1_1Server = new HttpSolrServer(shard1_1.getCoreUrl());
QueryResponse response2 = shard1_1Server.query(query);
long shard11Count = response2.getResults().getNumFound();
logDebugHelp(docCounts, response, shard10Count, response2, shard11Count);
assertEquals("Wrong doc count on shard1_0", docCounts[0], shard10Count);
assertEquals("Wrong doc count on shard1_1", docCounts[1], shard11Count);
ClusterState clusterState = null;
Slice slice1_0 = null, slice1_1 = null;
int i = 0;
for (i = 0; i < 10; i++) {
ZkStateReader zkStateReader = cloudClient.getZkStateReader();
zkStateReader.updateClusterState(true);
clusterState = zkStateReader.getClusterState();
slice1_0 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_0");
slice1_1 = clusterState.getSlice(AbstractDistribZkTestBase.DEFAULT_COLLECTION, "shard1_1");
if (Slice.ACTIVE.equals(slice1_0.getState()) && Slice.ACTIVE.equals(slice1_1.getState()))
break;
Thread.sleep(500);
}
log.info("ShardSplitTest waited for {} ms for shard state to be set to active", i * 500);
assertNotNull("Cluster state does not contain shard1_0", slice1_0);
assertNotNull("Cluster state does not contain shard1_0", slice1_1);
assertEquals("shard1_0 is not active", Slice.ACTIVE, slice1_0.getState());
assertEquals("shard1_1 is not active", Slice.ACTIVE, slice1_1.getState());
assertEquals("Wrong number of replicas created for shard1_0", numReplicas, slice1_0.getReplicas().size());
assertEquals("Wrong number of replicas created for shard1_1", numReplicas, slice1_1.getReplicas().size());
}
protected void splitShard(String shardId) throws SolrServerException, IOException {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set("action", CollectionParams.CollectionAction.SPLITSHARD.toString());
params.set("collection", "collection1");
params.set("shard", shardId);
SolrRequest request = new QueryRequest(params);
request.setPath("/admin/collections");
String baseUrl = ((HttpSolrServer) shardToJetty.get(SHARD1).get(0).client.solrClient)
.getBaseURL();
baseUrl = baseUrl.substring(0, baseUrl.length() - "collection1".length());
HttpSolrServer baseServer = new HttpSolrServer(baseUrl);
baseServer.setConnectionTimeout(15000);
baseServer.setSoTimeout((int) (CollectionsHandler.DEFAULT_ZK_TIMEOUT * 5));
baseServer.request(request);
}
protected void indexAndUpdateCount(List<DocRouter.Range> ranges, int[] docCounts, int id) throws Exception {
indexr("id", id);
// todo - hook in custom hashing
byte[] bytes = String.valueOf(id).getBytes("UTF-8");
int hash = Hash.murmurhash3_x86_32(bytes, 0, bytes.length, 0);
for (int i = 0; i < ranges.size(); i++) {
DocRouter.Range range = ranges.get(i);
if (range.includes(hash))
docCounts[i]++;
}
}
protected void logDebugHelp(int[] docCounts, QueryResponse response, long shard10Count, QueryResponse response2, long shard11Count) {
for (int i = 0; i < docCounts.length; i++) {
int docCount = docCounts[i];
log.info("Expected docCount for shard1_{} = {}", i, docCount);
}
log.info("Actual docCount for shard1_0 = {}", shard10Count);
log.info("Actual docCount for shard1_1 = {}", shard11Count);
Map<String, String> idVsVersion = new HashMap<String, String>();
Map<String, SolrDocument> shard10Docs = new HashMap<String, SolrDocument>();
Map<String, SolrDocument> shard11Docs = new HashMap<String, SolrDocument>();
for (int i = 0; i < response.getResults().size(); i++) {
SolrDocument document = response.getResults().get(i);
idVsVersion.put(document.getFieldValue("id").toString(), document.getFieldValue("_version_").toString());
SolrDocument old = shard10Docs.put(document.getFieldValue("id").toString(), document);
if (old != null) {
log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_0. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
}
}
for (int i = 0; i < response2.getResults().size(); i++) {
SolrDocument document = response2.getResults().get(i);
String value = document.getFieldValue("id").toString();
String version = idVsVersion.get(value);
if (version != null) {
log.error("DUPLICATE: ID: " + value + " , shard1_0Version: " + version + " shard1_1Version:" + document.getFieldValue("_version_"));
}
SolrDocument old = shard11Docs.put(document.getFieldValue("id").toString(), document);
if (old != null) {
log.error("EXTRA: ID: " + document.getFieldValue("id") + " on shard1_1. Old version: " + old.getFieldValue("_version_") + " new version: " + document.getFieldValue("_version_"));
}
}
}
@Override
protected SolrServer createNewSolrServer(String collection, String baseUrl) {
HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(collection, baseUrl);
server.setSoTimeout(5 * 60 * 1000);
return server;
}
@Override
protected SolrServer createNewSolrServer(int port) {
HttpSolrServer server = (HttpSolrServer) super.createNewSolrServer(port);
server.setSoTimeout(5 * 60 * 1000);
return server;
}
@Override
protected CloudSolrServer createCloudClient(String defaultCollection) throws MalformedURLException {
CloudSolrServer client = super.createCloudClient(defaultCollection);
client.getLbServer().getHttpClient().getParams().setParameter(CoreConnectionPNames.SO_TIMEOUT, 5 * 60 * 1000);
return client;
}
}
| halentest/solr | solr/core/src/test/org/apache/solr/cloud/ShardSplitTest.java | Java | apache-2.0 | 11,050 |
# Phoma amaranthi Brunaud SPECIES
#### Status
ACCEPTED
#### According to
Index Fungorum
#### Published in
Bull. Torrey bot. Club 20: 251 (1893)
#### Original name
Phoma amaranthi Brunaud
### Remarks
null | mdoering/backbone | life/Fungi/Ascomycota/Dothideomycetes/Pleosporales/Phoma/Phoma amaranthi/README.md | Markdown | apache-2.0 | 208 |
package org.zstack.header.identity;
import org.zstack.header.message.APICreateMessage;
import org.zstack.header.message.APIMessage;
import org.zstack.header.message.APIParam;
@NeedRoles(roles = {IdentityRoles.CREATE_POLICY_ROLE})
public class APICreatePolicyMsg extends APICreateMessage implements AccountMessage {
@APIParam
private String name;
private String description;
@APIParam
private String policyData;
@Override
public String getAccountUuid() {
return this.getSession().getAccountUuid();
}
public String getPolicyData() {
return policyData;
}
public void setPolicyData(String policyData) {
this.policyData = policyData;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
}
| SoftwareKing/zstack | header/src/main/java/org/zstack/header/identity/APICreatePolicyMsg.java | Java | apache-2.0 | 1,059 |
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Xml.Serialization;
namespace SAM.DTO
{
[XmlType(TypeName = "user")]
public class User
{
public string id { get; set; }
public string name { get; set; }
public string avatar_url { get; set; }
}
[XmlType(TypeName = "users")]
public class UserList : SamList<User>
{
}
}
| SAMdesk/sam-dotnet | src/SAM/DTO/User.cs | C# | apache-2.0 | 430 |
// Copyright 2017 The Bazel Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.devtools.build.lib.profiler.memory;
import com.google.common.base.Objects;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Iterables;
import com.google.common.collect.MapMaker;
import com.google.devtools.build.lib.concurrent.ThreadSafety.ConditionallyThreadCompatible;
import com.google.devtools.build.lib.concurrent.ThreadSafety.ThreadSafe;
import com.google.devtools.build.lib.packages.AspectClass;
import com.google.devtools.build.lib.packages.RuleClass;
import com.google.devtools.build.lib.packages.RuleFunction;
import com.google.devtools.build.lib.syntax.Debug;
import com.google.devtools.build.lib.syntax.Location;
import com.google.devtools.build.lib.syntax.StarlarkCallable;
import com.google.devtools.build.lib.syntax.StarlarkThread;
import com.google.monitoring.runtime.instrumentation.Sampler;
import com.google.perftools.profiles.ProfileProto.Function;
import com.google.perftools.profiles.ProfileProto.Line;
import com.google.perftools.profiles.ProfileProto.Profile;
import com.google.perftools.profiles.ProfileProto.Sample;
import com.google.perftools.profiles.ProfileProto.ValueType;
import java.io.FileOutputStream;
import java.io.IOException;
import java.time.Instant;
import java.util.HashMap;
import java.util.Map;
import java.util.Random;
import java.util.zip.GZIPOutputStream;
import javax.annotation.Nullable;
/** Tracks allocations for memory reporting. */
@ConditionallyThreadCompatible
@SuppressWarnings("ThreadLocalUsage") // the AllocationTracker is effectively a global
public final class AllocationTracker implements Sampler, Debug.ThreadHook {
// A mapping from Java thread to StarlarkThread.
// Used to effect a hidden StarlarkThread parameter to sampleAllocation.
// TODO(adonovan): opt: merge the three different ThreadLocals in use here.
private final ThreadLocal<StarlarkThread> starlarkThread = new ThreadLocal<>();
@Override
public void onPushFirst(StarlarkThread thread) {
starlarkThread.set(thread);
}
@Override
public void onPopLast(StarlarkThread thread) {
starlarkThread.remove();
}
private static class AllocationSample {
@Nullable final RuleClass ruleClass; // Current rule being analysed, if any
@Nullable final AspectClass aspectClass; // Current aspect being analysed, if any
final ImmutableList<Frame> callstack; // Starlark callstack, if any
final long bytes;
AllocationSample(
@Nullable RuleClass ruleClass,
@Nullable AspectClass aspectClass,
ImmutableList<Frame> callstack,
long bytes) {
this.ruleClass = ruleClass;
this.aspectClass = aspectClass;
this.callstack = callstack;
this.bytes = bytes;
}
}
private static class Frame {
final String name;
final Location loc;
@Nullable final RuleFunction ruleFunction;
Frame(String name, Location loc, @Nullable RuleFunction ruleFunction) {
this.name = name;
this.loc = loc;
this.ruleFunction = ruleFunction;
}
}
private final Map<Object, AllocationSample> allocations = new MapMaker().weakKeys().makeMap();
private final int samplePeriod;
private final int sampleVariance;
private boolean enabled = true;
/**
* Cheap wrapper class for a long. Avoids having to do two thread-local lookups per allocation.
*/
private static final class LongValue {
long value;
}
private final ThreadLocal<LongValue> currentSampleBytes = ThreadLocal.withInitial(LongValue::new);
private final ThreadLocal<Long> nextSampleBytes = ThreadLocal.withInitial(this::getNextSample);
private final Random random = new Random();
AllocationTracker(int samplePeriod, int variance) {
this.samplePeriod = samplePeriod;
this.sampleVariance = variance;
}
// Called by instrumentation.recordAllocation, which is in turn called
// by an instrumented version of the application assembled on the fly
// by instrumentation.AllocationInstrumenter.
// The instrumenter inserts a call to recordAllocation after every
// memory allocation instruction in the original class.
//
// This function runs within 'new', so is not supposed to allocate memory;
// see Sampler interface. In fact it allocates in nearly a dozen places.
// TODO(adonovan): suppress reentrant calls by setting a thread-local flag.
@Override
@ThreadSafe
public void sampleAllocation(int count, String desc, Object newObj, long size) {
if (!enabled) {
return;
}
@Nullable StarlarkThread thread = starlarkThread.get();
// Calling Debug.getCallStack is a dubious operation here.
// First it allocates memory, which breaks the Sampler contract.
// Second, the allocation could in principle occur while the thread's
// representation invariants are temporarily broken (that is, during
// the call to ArrayList.add when pushing a new stack frame).
// For now at least, the allocation done by ArrayList.add occurs before
// the representation of the ArrayList is changed, so it is safe,
// but this is a fragile assumption.
ImmutableList<Debug.Frame> callstack =
thread != null ? Debug.getCallStack(thread) : ImmutableList.of();
RuleClass ruleClass = CurrentRuleTracker.getRule();
AspectClass aspectClass = CurrentRuleTracker.getAspect();
// Should we bother sampling?
if (callstack.isEmpty() && ruleClass == null && aspectClass == null) {
return;
}
// Convert the thread's stack right away to our internal form.
// It is not safe to inspect Debug.Frame references once the thread resumes,
// and keeping StarlarkCallable values live defeats garbage collection.
ImmutableList.Builder<Frame> frames = ImmutableList.builderWithExpectedSize(callstack.size());
for (Debug.Frame fr : callstack) {
// The frame's PC location is currently not updated at every step,
// only at function calls, so the leaf frame's line number may be
// slightly off; see the tests.
// TODO(b/149023294): remove comment when we move to a compiled representation.
StarlarkCallable fn = fr.getFunction();
frames.add(
new Frame(
fn.getName(),
fr.getLocation(),
fn instanceof RuleFunction ? (RuleFunction) fn : null));
}
// If we start getting stack overflows here, it's because the memory sampling
// implementation has changed to call back into the sampling method immediately on
// every allocation. Since thread locals can allocate, this can in this case lead
// to infinite recursion. This method will then need to be rewritten to not
// allocate, or at least not allocate to obtain its sample counters.
LongValue bytesValue = currentSampleBytes.get();
long bytes = bytesValue.value + size;
if (bytes < nextSampleBytes.get()) {
bytesValue.value = bytes;
return;
}
bytesValue.value = 0;
nextSampleBytes.set(getNextSample());
allocations.put(newObj, new AllocationSample(ruleClass, aspectClass, frames.build(), bytes));
}
private long getNextSample() {
return (long) samplePeriod
+ (sampleVariance > 0 ? (random.nextInt(sampleVariance * 2) - sampleVariance) : 0);
}
/** A pair of rule/aspect name and the bytes it consumes. */
public static final class RuleBytes {
private final String name;
private long bytes;
public RuleBytes(String name) {
this.name = name;
}
/** The number of bytes total occupied by this rule or aspect class. */
public long getBytes() {
return bytes;
}
public RuleBytes addBytes(long bytes) {
this.bytes += bytes;
return this;
}
@Override
public String toString() {
return String.format("RuleBytes(%s, %d)", name, bytes);
}
@Override
public boolean equals(Object o) {
if (this == o) {
return true;
}
if (o == null || getClass() != o.getClass()) {
return false;
}
RuleBytes ruleBytes = (RuleBytes) o;
return bytes == ruleBytes.bytes && Objects.equal(name, ruleBytes.name);
}
@Override
public int hashCode() {
return Objects.hashCode(name, bytes);
}
}
// If the topmost stack entry is a call to a rule function, returns it.
@Nullable
private static RuleFunction getRule(AllocationSample sample) {
Frame top = Iterables.getLast(sample.callstack, null);
return top != null ? top.ruleFunction : null;
}
/**
* Returns the total memory consumption for rules and aspects, keyed by {@link RuleClass#getKey}
* or {@link AspectClass#getKey}.
*/
public void getRuleMemoryConsumption(
Map<String, RuleBytes> rules, Map<String, RuleBytes> aspects) {
// Make sure we don't track our own allocations
enabled = false;
System.gc();
// Get loading phase memory for rules.
for (AllocationSample sample : allocations.values()) {
RuleFunction rule = getRule(sample);
if (rule != null) {
RuleClass ruleClass = rule.getRuleClass();
String key = ruleClass.getKey();
RuleBytes ruleBytes = rules.computeIfAbsent(key, k -> new RuleBytes(ruleClass.getName()));
rules.put(key, ruleBytes.addBytes(sample.bytes));
}
}
// Get analysis phase memory for rules and aspects
for (AllocationSample sample : allocations.values()) {
if (sample.ruleClass != null) {
String key = sample.ruleClass.getKey();
RuleBytes ruleBytes =
rules.computeIfAbsent(key, k -> new RuleBytes(sample.ruleClass.getName()));
rules.put(key, ruleBytes.addBytes(sample.bytes));
}
if (sample.aspectClass != null) {
String key = sample.aspectClass.getKey();
RuleBytes ruleBytes =
aspects.computeIfAbsent(key, k -> new RuleBytes(sample.aspectClass.getName()));
aspects.put(key, ruleBytes.addBytes(sample.bytes));
}
}
enabled = true;
}
/** Dumps all Starlark analysis time allocations to a pprof-compatible file. */
public void dumpSkylarkAllocations(String path) throws IOException {
// Make sure we don't track our own allocations
enabled = false;
System.gc();
Profile profile = buildMemoryProfile();
try (GZIPOutputStream outputStream = new GZIPOutputStream(new FileOutputStream(path))) {
profile.writeTo(outputStream);
outputStream.finish();
}
enabled = true;
}
Profile buildMemoryProfile() {
Profile.Builder profile = Profile.newBuilder();
StringTable stringTable = new StringTable(profile);
FunctionTable functionTable = new FunctionTable(profile, stringTable);
LocationTable locationTable = new LocationTable(profile, functionTable);
profile.addSampleType(
ValueType.newBuilder()
.setType(stringTable.get("memory"))
.setUnit(stringTable.get("bytes"))
.build());
for (AllocationSample sample : allocations.values()) {
// Skip empty callstacks
if (sample.callstack.isEmpty()) {
continue;
}
Sample.Builder b = Sample.newBuilder().addValue(sample.bytes);
for (Frame fr : sample.callstack.reverse()) {
b.addLocationId(locationTable.get(fr.loc.file(), fr.name, fr.loc.line()));
}
profile.addSample(b.build());
}
profile.setTimeNanos(Instant.now().getEpochSecond() * 1000000000);
return profile.build();
}
private static class StringTable {
final Profile.Builder profile;
final Map<String, Long> table = new HashMap<>();
long index = 0;
StringTable(Profile.Builder profile) {
this.profile = profile;
get(""); // 0 is reserved for the empty string
}
long get(String str) {
return table.computeIfAbsent(
str,
key -> {
profile.addStringTable(key);
return index++;
});
}
}
private static class FunctionTable {
final Profile.Builder profile;
final StringTable stringTable;
final Map<String, Long> table = new HashMap<>();
long index = 1; // 0 is reserved
FunctionTable(Profile.Builder profile, StringTable stringTable) {
this.profile = profile;
this.stringTable = stringTable;
}
long get(String file, String function) {
return table.computeIfAbsent(
file + "#" + function,
key -> {
Function fn =
Function.newBuilder()
.setId(index)
.setFilename(stringTable.get(file))
.setName(stringTable.get(function))
.build();
profile.addFunction(fn);
return index++;
});
}
}
private static class LocationTable {
final Profile.Builder profile;
final FunctionTable functionTable;
final Map<String, Long> table = new HashMap<>();
long index = 1; // 0 is reserved
LocationTable(Profile.Builder profile, FunctionTable functionTable) {
this.profile = profile;
this.functionTable = functionTable;
}
long get(String file, String function, long line) {
return table.computeIfAbsent(
file + "#" + function + "#" + line,
key -> {
com.google.perftools.profiles.ProfileProto.Location location =
com.google.perftools.profiles.ProfileProto.Location.newBuilder()
.setId(index)
.addLine(
Line.newBuilder()
.setFunctionId(functionTable.get(file, function))
.setLine(line)
.build())
.build();
profile.addLocation(location);
return index++;
});
}
}
}
| akira-baruah/bazel | src/main/java/com/google/devtools/build/lib/profiler/memory/AllocationTracker.java | Java | apache-2.0 | 14,364 |
/**
* @file ff_lpc546xx.c
* @brief board ID for the NXP LPC54608Xpresso board
*
* DAPLink Interface Firmware
* Copyright (c) 2009-2019, ARM Limited, All Rights Reserved
* SPDX-License-Identifier: Apache-2.0
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "target_family.h"
#include "target_board.h"
const board_info_t g_board_info = {
.info_version = kBoardInfoVersion,
.board_id = "8081",
.family_id = kStub_HWReset_FamilyID,
.daplink_url_name = "PRODINFOHTM",
.daplink_drive_name = "FF-LPC546XX",
.daplink_target_url = "https://os.mbed.com/platforms/L-TEK-FF-LPC546XX",
.target_cfg = &target_device,
};
| google/DAPLink-port | source/board/ff_lpc546xx.c | C | apache-2.0 | 1,177 |
=head1 LICENSE
See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=head1 NAME
Bio::EnsEMBL::Compara::PipeConfig::EPO_conf
=head1 SYNOPSIS
init_pipeline.pl Bio::EnsEMBL::Compara::PipeConfig::EPO_conf -host mysql-ens-compara-prod-X -port XXXX \
-division $COMPARA_DIV -species_set_name <species_set_name>
=head1 DESCRIPTION
This PipeConfig file gives defaults for mapping (using exonerate at the moment)
anchors to a set of target genomes (dumped text files).
=cut
package Bio::EnsEMBL::Compara::PipeConfig::EPO_conf;
use strict;
use warnings;
use Bio::EnsEMBL::Hive::Version 2.4;
use Bio::EnsEMBL::Hive::PipeConfig::HiveGeneric_conf; # For INPUT_PLUS
use Bio::EnsEMBL::Compara::PipeConfig::Parts::EPOMapAnchors;
use Bio::EnsEMBL::Compara::PipeConfig::Parts::EPOAlignment;
use base ('Bio::EnsEMBL::Compara::PipeConfig::ComparaGeneric_conf');
sub default_options {
my ($self) = @_;
return {
%{$self->SUPER::default_options},
'pipeline_name' => $self->o('species_set_name').'_epo_'.$self->o('rel_with_suffix'),
'method_type' => 'EPO',
# Databases
'compara_master' => 'compara_master',
# Database containing the anchors for mapping
'compara_anchor_db' => $self->o('species_set_name') . '_epo_anchors',
# The previous database to reuse the anchor mappings
'reuse_db' => $self->o('species_set_name') . '_epo_prev',
# The ancestral_db is created on the same server as the pipeline_db
'ancestral_db' => {
-driver => $self->o('pipeline_db', '-driver'),
-host => $self->o('pipeline_db', '-host'),
-port => $self->o('pipeline_db', '-port'),
-species => $self->o('ancestral_sequences_name'),
-user => $self->o('pipeline_db', '-user'),
-pass => $self->o('pipeline_db', '-pass'),
-dbname => $self->o('dbowner').'_'.$self->o('species_set_name').'_ancestral_core_'.$self->o('rel_with_suffix'),
},
'ancestral_sequences_name' => 'ancestral_sequences',
'ancestral_sequences_display_name' => 'Ancestral sequences',
# Executable parameters
'mapping_params' => { bestn=>11, gappedextension=>"no", softmasktarget=>"no", percent=>75, showalignment=>"no", model=>"affine:local", },
'enredo_params' => ' --min-score 0 --max-gap-length 200000 --max-path-dissimilarity 4 --min-length 10000 --min-regions 2 --min-anchors 3 --max-ratio 3 --simplify-graph 7 --bridges -o ',
'gerp_window_sizes' => [1,10,100,500], #gerp window sizes
# Dump directory
'work_dir' => $self->o('pipeline_dir'),
'enredo_output_file' => $self->o('work_dir').'/enredo_output.txt',
'bed_dir' => $self->o('work_dir').'/bed',
'feature_dir' => $self->o('work_dir').'/feature_dump',
'enredo_mapping_file' => $self->o('work_dir').'/enredo_input.txt',
'bl2seq_dump_dir' => $self->o('work_dir').'/bl2seq', # location for dumping sequences to determine strand (for bl2seq)
'bl2seq_file_stem' => '#bl2seq_dump_dir#/bl2seq',
'output_dir' => '#feature_dir#', # alias
# Options
#skip this module if set to 1
'skip_multiplealigner_stats' => 0,
# dont dump the MT sequence for mapping
'only_nuclear_genome' => 1,
# add MT dnafrags separately (1) or not (0) to the dnafrag_region table
'add_non_nuclear_alignments' => 1,
# batch size of anchor sequences to map
'anchor_batch_size' => 1000,
# Usually set to 0 because we run Gerp on the EPO2X alignment instead
'run_gerp' => 0,
# Capacities
'low_capacity' => 10,
'map_anchors_batch_size' => 5,
'map_anchors_capacity' => 2000,
'trim_anchor_align_batch_size' => 20,
'trim_anchor_align_capacity' => 500,
};
}
sub pipeline_create_commands {
my ($self) = @_;
return [
@{$self->SUPER::pipeline_create_commands}, # inheriting database and hive tables' creation
$self->pipeline_create_commands_rm_mkdir(['work_dir', 'bed_dir', 'feature_dir', 'bl2seq_dump_dir']),
];
}
sub pipeline_wide_parameters {
my $self = shift @_;
return {
%{$self->SUPER::pipeline_wide_parameters},
# directories
'work_dir' => $self->o('work_dir'),
'feature_dir' => $self->o('feature_dir'),
'enredo_output_file' => $self->o('enredo_output_file'),
'bed_dir' => $self->o('bed_dir'),
'genome_dumps_dir' => $self->o('genome_dumps_dir'),
'enredo_mapping_file' => $self->o('enredo_mapping_file'),
'bl2seq_dump_dir' => $self->o('bl2seq_dump_dir'),
'bl2seq_file_stem' => $self->o('bl2seq_file_stem'),
# databases
'compara_anchor_db' => $self->o('compara_anchor_db'),
'master_db' => $self->o('compara_master'),
'reuse_db' => $self->o('reuse_db'),
'ancestral_db' => $self->o('ancestral_db'),
# options
'run_gerp' => $self->o('run_gerp'),
};
}
sub core_pipeline_analyses {
my ($self) = @_;
return [
{ -logic_name => 'load_mlss_id',
-module => 'Bio::EnsEMBL::Compara::RunnableDB::LoadMLSSids',
-parameters => {
'method_type' => $self->o('method_type'),
'species_set_name' => $self->o('species_set_name'),
'release' => $self->o('ensembl_release'),
},
-input_ids => [{}],
-flow_into => {
'1->A' => [ 'copy_table_factory', 'set_internal_ids', 'drop_ancestral_db' ],
'A->1' => 'reuse_anchor_align_factory',
}
},
@{ Bio::EnsEMBL::Compara::PipeConfig::Parts::EPOMapAnchors::pipeline_analyses_epo_anchor_mapping($self) },
@{ Bio::EnsEMBL::Compara::PipeConfig::Parts::EPOAlignment::pipeline_analyses_epo_alignment($self) },
];
}
sub tweak_analyses {
my $self = shift;
my $analyses_by_name = shift;
# Move "make_species_tree" right after "create_mlss_ss" and disconnect it from "dump_mappings_to_file"
$analyses_by_name->{'create_mlss_ss'}->{'-flow_into'} = [ 'make_species_tree' ];
$analyses_by_name->{'make_species_tree'}->{'-flow_into'} = WHEN( '#run_gerp#' => [ 'set_gerp_neutral_rate' ] );
delete $analyses_by_name->{'set_gerp_neutral_rate'}->{'-flow_into'}->{1};
# Do "dump_mappings_to_file" after having trimmed the anchors
$analyses_by_name->{'trim_anchor_align_factory'}->{'-flow_into'} = {
'2->A' => $analyses_by_name->{'trim_anchor_align_factory'}->{'-flow_into'}->{2},
'A->1' => [ 'dump_mappings_to_file' ],
};
}
1;
| Ensembl/ensembl-compara | modules/Bio/EnsEMBL/Compara/PipeConfig/EPO_conf.pm | Perl | apache-2.0 | 7,494 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <axis2_http_simple_response.h>
#include <axis2_http_transport.h>
#include <axutil_string.h>
#include <stdio.h>
#include <string.h>
#include <axutil_types.h>
#include <axiom_mime_part.h>
#define AXIS2_HTTP_SIMPLE_RESPONSE_READ_SIZE 2048
struct axis2_http_simple_response
{
axis2_http_status_line_t *status_line;
axutil_array_list_t *header_group;
axutil_stream_t *stream;
axutil_array_list_t *mime_parts;
axis2_char_t *mtom_sending_callback_name;
};
AXIS2_EXTERN axis2_http_simple_response_t *AXIS2_CALL
axis2_http_simple_response_create(
const axutil_env_t * env,
axis2_http_status_line_t * status_line,
const axis2_http_header_t ** http_headers,
const axis2_ssize_t http_hdr_count,
axutil_stream_t * content)
{
axis2_http_simple_response_t *ret = NULL;
ret = axis2_http_simple_response_create_default(env);
if(!ret)
{
AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "axis2 http simple response creation failed");
return NULL;
}
ret->status_line = status_line;
if(http_hdr_count > 0 && http_headers)
{
int i = 0;
ret->header_group = axutil_array_list_create(env, http_hdr_count);
for(i = 0; i < (int)http_hdr_count; i++)
/* We are sure that the difference lies within the int range */
{
axutil_array_list_add(ret->header_group, env, (void *)http_headers[i]);
}
}
ret->stream = content;
return ret;
}
AXIS2_EXTERN axis2_http_simple_response_t *AXIS2_CALL
axis2_http_simple_response_create_default(
const axutil_env_t * env)
{
axis2_http_simple_response_t *simple_response = NULL;
simple_response = (axis2_http_simple_response_t *)AXIS2_MALLOC(env->allocator,
sizeof(axis2_http_simple_response_t));
if(!simple_response)
{
AXIS2_HANDLE_ERROR(env, AXIS2_ERROR_NO_MEMORY, AXIS2_FAILURE);
return NULL;
}
memset((void *)simple_response, 0, sizeof(axis2_http_simple_response_t));
return simple_response;
}
void AXIS2_CALL
axis2_http_simple_response_free(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env)
{
if(simple_response->status_line)
{
axis2_http_status_line_free(simple_response->status_line, env);
}
if(simple_response->header_group)
{
int i = 0;
for(i = 0; i < axutil_array_list_size(simple_response->header_group, env); i++)
{
void *tmp = NULL;
tmp = axutil_array_list_get(simple_response-> header_group, env, i);
if(tmp)
{
axis2_http_header_free((axis2_http_header_t *)tmp, env);
}
}
axutil_array_list_free(simple_response->header_group, env);
}
if(simple_response->mime_parts)
{
int i = 0;
for(i = 0; i < axutil_array_list_size(simple_response->mime_parts, env); i++)
{
void *mime_part = NULL;
mime_part = axutil_array_list_get(simple_response->mime_parts, env, i);
if(mime_part)
{
axiom_mime_part_free((axiom_mime_part_t *)mime_part, env);
}
}
axutil_array_list_free(simple_response->mime_parts, env);
}
/* Stream is not freed. Assumption : stream doesn't belong to the response */
AXIS2_FREE(env->allocator, simple_response);
}
axis2_status_t AXIS2_CALL
axis2_http_simple_response_set_status_line(
struct axis2_http_simple_response * simple_response,
const axutil_env_t * env,
const axis2_char_t * http_ver,
const int status_code,
const axis2_char_t * phrase)
{
if(!http_ver || !phrase || !status_code)
{
AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "invalid parameter given");
return AXIS2_FAILURE;
}
if(simple_response->status_line)
{
axis2_http_status_line_free(simple_response->status_line, env);
}
simple_response->status_line = axis2_http_status_line_create_with_values(
env, http_ver, status_code, phrase);
if(!simple_response->status_line)
{
AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI,
"http status line creation failed for string %s %3d %s", http_ver, status_code, phrase);
return AXIS2_FAILURE;
}
return AXIS2_SUCCESS;
}
axis2_char_t *AXIS2_CALL
axis2_http_simple_response_get_phrase(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env)
{
if(!(simple_response->status_line))
{
AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI,
"axis2 simple response , status line is not available");
return NULL;
}
return axis2_http_status_line_get_reason_phrase(simple_response-> status_line, env);
}
int AXIS2_CALL
axis2_http_simple_response_get_status_code(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env)
{
if(!(simple_response->status_line))
{
AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI,
"axis2 simple response , status line is not available");
return -1;
}
return axis2_http_status_line_get_status_code(simple_response->status_line, env);
}
axis2_char_t *AXIS2_CALL
axis2_http_simple_response_get_http_version(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env)
{
if(!(simple_response->status_line))
{
AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI,
"axis2 simple response , status line is not available");
return NULL;
}
return axis2_http_status_line_get_http_version(simple_response->status_line, env);
}
axis2_status_t AXIS2_CALL
axis2_http_simple_response_set_http_version(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env,
axis2_char_t *http_version)
{
if(!(simple_response->status_line))
{
AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI,
"axis2 simple response , status line is not available");
return AXIS2_FAILURE;
}
axis2_http_status_line_set_http_version(simple_response->status_line, env, http_version);
return AXIS2_SUCCESS;
}
axis2_char_t *AXIS2_CALL
axis2_http_simple_response_get_status_line(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env)
{
if(!(simple_response->status_line))
{
AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI,
"axis2 simple response , status line is not available");
return NULL;
}
return axis2_http_status_line_to_string(simple_response->status_line, env);
}
AXIS2_EXTERN axutil_array_list_t *AXIS2_CALL
axis2_http_simple_response_get_headers(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env)
{
return simple_response->header_group;
}
axutil_array_list_t *AXIS2_CALL
axis2_http_simple_response_extract_headers(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env)
{
axutil_array_list_t *temp = NULL;
temp = simple_response->header_group;
if(temp)
{
simple_response->header_group = NULL;
}
return temp;
}
axis2_http_header_t *AXIS2_CALL
axis2_http_simple_response_get_first_header(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env,
const axis2_char_t * str)
{
axis2_http_header_t *tmp_header = NULL;
axis2_char_t *tmp_name = NULL;
int i = 0;
int count = 0;
axutil_array_list_t *header_group = NULL;
AXIS2_PARAM_CHECK(env->error, str, NULL);
header_group = simple_response->header_group;
if(!simple_response->header_group)
{
AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "axis2 simple response , headers not available");
return NULL;
}
if(0 == axutil_array_list_size(header_group, env))
{
AXIS2_LOG_WARNING(env->log, AXIS2_LOG_SI, "axis2 simple response , contains zero headers");
return NULL;
}
count = axutil_array_list_size(header_group, env);
for(i = 0; i < count; i++)
{
tmp_header = (axis2_http_header_t *)axutil_array_list_get(header_group, env, i);
tmp_name = axis2_http_header_get_name(tmp_header, env);
if(0 == axutil_strcasecmp(str, tmp_name))
{
return tmp_header;
}
}
return NULL;
}
axis2_status_t AXIS2_CALL
axis2_http_simple_response_remove_headers(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env,
const axis2_char_t * str)
{
axutil_array_list_t *header_group = NULL;
int i = 0;
int count = 0;
AXIS2_PARAM_CHECK(env->error, str, AXIS2_FAILURE);
header_group = simple_response->header_group;
if(!header_group)
{
/* Even though we couldn't complete the op, we are sure that the
* required header is no more in the request. So we can proceed without a
* problem.
*/
return AXIS2_SUCCESS;
}
count = axutil_array_list_size(header_group, env);
for(i = 0; i < count; i++)
{
axis2_http_header_t *tmp_header = NULL;
axis2_char_t *tmp_name = NULL;
tmp_header = (axis2_http_header_t *)axutil_array_list_get(header_group, env, i);
tmp_name = axis2_http_header_get_name(tmp_header, env);
if(0 == axutil_strcasecmp(str, tmp_name))
{
axis2_http_header_free(tmp_header, env);
axutil_array_list_remove(header_group, env, i);
break;
}
}
return AXIS2_SUCCESS;
}
axis2_status_t AXIS2_CALL
axis2_http_simple_response_set_header(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env,
axis2_http_header_t * header)
{
int i = 0;
int count = 0;
axutil_array_list_t *header_group = NULL;
AXIS2_PARAM_CHECK(env->error, header, AXIS2_FAILURE);
if(!simple_response->header_group)
{
simple_response->header_group = axutil_array_list_create(env, 10);
axutil_array_list_add(simple_response->header_group, env, header);
return AXIS2_SUCCESS;
}
/* If a header with the same name exists search and remove the old header */
header_group = simple_response->header_group;
count = axutil_array_list_size(header_group, env);
for(i = 0; i < count; i++)
{
axis2_http_header_t *tmp_header = NULL;
axis2_char_t *tmp_name = NULL;
tmp_header = (axis2_http_header_t *)axutil_array_list_get(header_group, env, i);
tmp_name = axis2_http_header_get_name(tmp_header, env);
if(0 == axutil_strcasecmp(axis2_http_header_get_name(header, env), tmp_name))
{
axis2_http_header_free(tmp_header, env);
axutil_array_list_set(header_group, env, i, header);
return AXIS2_SUCCESS;
}
}
/* if header is not found, then we have to add it */
axutil_array_list_add(header_group, env, header);
return AXIS2_SUCCESS;
}
const axis2_char_t *AXIS2_CALL
axis2_http_simple_response_get_charset(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env)
{
axis2_http_header_t *tmp_header = NULL;
tmp_header = axis2_http_simple_response_get_first_header(simple_response, env,
AXIS2_HTTP_HEADER_CONTENT_TYPE);
if(tmp_header)
{
axis2_char_t *value = axis2_http_header_get_value(tmp_header, env);
axis2_char_t *charset = (axis2_char_t *)strstr((char *)value,
(char *)AXIS2_HTTP_CHAR_SET_ENCODING);
if(charset)
{
charset = strchr((char *)charset, AXIS2_EQ);
return charset;
}
}
return AXIS2_HTTP_DEFAULT_CONTENT_CHARSET;
}
axis2_ssize_t AXIS2_CALL
axis2_http_simple_response_get_content_length(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env)
{
axis2_http_header_t *tmp_header = NULL;
int error_return = -1;
tmp_header = axis2_http_simple_response_get_first_header(simple_response, env,
AXIS2_HTTP_HEADER_CONTENT_LENGTH);
if(tmp_header)
{
return AXIS2_ATOI(axis2_http_header_get_value(tmp_header, env));
}
return error_return;
}
const axis2_char_t *AXIS2_CALL
axis2_http_simple_response_get_content_type(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env)
{
axis2_http_header_t *tmp_header = NULL;
tmp_header = axis2_http_simple_response_get_first_header(simple_response, env,
AXIS2_HTTP_HEADER_CONTENT_TYPE);
if(tmp_header)
{
return axis2_http_header_get_value(tmp_header, env);
}
return AXIS2_HTTP_HEADER_ACCEPT_TEXT_PLAIN;
}
axis2_status_t AXIS2_CALL
axis2_http_simple_response_set_body_string(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env,
axis2_char_t * str)
{
axutil_stream_t *body_stream = NULL;
AXIS2_PARAM_CHECK(env->error, str, AXIS2_FAILURE);
body_stream = simple_response->stream;
if(!body_stream)
{
body_stream = axutil_stream_create_basic(env);
if(!body_stream)
{
AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "unable to create basic stream for string %s",
str);
return AXIS2_FAILURE;
}
simple_response->stream = body_stream;
}
axutil_stream_write(body_stream, env, str, axutil_strlen(str));
return AXIS2_SUCCESS;
}
axis2_status_t AXIS2_CALL
axis2_http_simple_response_set_body_stream(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env,
axutil_stream_t * stream)
{
/*
* We don't free the stream
* Problem in freeing is most of the time the stream doesn't belong
* to the http_simple_response
*/
simple_response->stream = stream;
return AXIS2_SUCCESS;
}
axutil_stream_t *AXIS2_CALL
axis2_http_simple_response_get_body(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env)
{
return simple_response->stream;
}
axis2_ssize_t AXIS2_CALL
axis2_http_simple_response_get_body_bytes(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env,
axis2_char_t ** buffer)
{
axutil_stream_t *tmp_stream = NULL;
axis2_bool_t loop_state = AXIS2_TRUE;
int return_size = -1;
if(!simple_response->stream)
{
AXIS2_HANDLE_ERROR(env, AXIS2_ERROR_NULL_BODY, AXIS2_FAILURE);
return return_size;
}
tmp_stream = axutil_stream_create_basic(env);
while(loop_state)
{
int read = 0;
/*int write = 0;*/
char buf[AXIS2_HTTP_SIMPLE_RESPONSE_READ_SIZE];
read = axutil_stream_read(simple_response->stream, env, buf,
AXIS2_HTTP_SIMPLE_RESPONSE_READ_SIZE);
if(read < 0)
{
break;
}
/*write = */axutil_stream_write(tmp_stream, env, buf, read);
if(read < (AXIS2_HTTP_SIMPLE_RESPONSE_READ_SIZE - 1))
{
break;
}
}
return_size = axutil_stream_get_len(tmp_stream, env);
if(return_size > 0)
{
*buffer = (char *)AXIS2_MALLOC(env->allocator, sizeof(char) * (return_size + 1));
if(!buffer)
{
AXIS2_HANDLE_ERROR(env, AXIS2_ERROR_NO_MEMORY, AXIS2_FAILURE);
return -1;
}
return_size = axutil_stream_read(tmp_stream, env, *buffer, return_size + 1);
}
axutil_stream_free(tmp_stream, env);
return return_size;
}
axis2_bool_t AXIS2_CALL
axis2_http_simple_response_contains_header(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env,
const axis2_char_t * name)
{
axis2_char_t *header_name = NULL;
int count = 0;
int i = 0;
AXIS2_PARAM_CHECK(env->error, name, AXIS2_FAILURE);
if(!simple_response->header_group)
{
AXIS2_LOG_ERROR(env->log, AXIS2_LOG_SI, "axis2 simple response , headers not available");
return AXIS2_FALSE;
}
count = axutil_array_list_size(simple_response->header_group, env);
if(0 == count)
{
AXIS2_LOG_WARNING(env->log, AXIS2_LOG_SI, "axis2 simple response , contains zero headers");
return AXIS2_FALSE;
}
for(i = 0; i < count; i++)
{
axis2_http_header_t *header = (axis2_http_header_t *)axutil_array_list_get(
simple_response->header_group, env, i);
header_name = axis2_http_header_get_name(header, env);
if(0 == axutil_strcasecmp(name, header_name))
{
return AXIS2_TRUE;
}
}
return AXIS2_FALSE;
}
AXIS2_EXTERN axutil_array_list_t *AXIS2_CALL
axis2_http_simple_response_get_mime_parts(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env)
{
return simple_response->mime_parts;
}
void AXIS2_EXTERN AXIS2_CALL
axis2_http_simple_response_set_mime_parts(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env,
axutil_array_list_t *mime_parts)
{
simple_response->mime_parts = mime_parts;
}
AXIS2_EXTERN axis2_char_t *AXIS2_CALL
axis2_http_simple_response_get_mtom_sending_callback_name(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env)
{
return simple_response->mtom_sending_callback_name;
}
void AXIS2_EXTERN AXIS2_CALL
axis2_http_simple_response_set_mtom_sending_callback_name(
axis2_http_simple_response_t * simple_response,
const axutil_env_t * env,
axis2_char_t *mtom_sending_callback_name)
{
simple_response->mtom_sending_callback_name =
mtom_sending_callback_name;
}
| axbannaz/axis2-c | src/core/transport/http/common/http_simple_response.c | C | apache-2.0 | 18,252 |
# Copyright (c) 2012 NetApp, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the NetApp-specific NFS driver module."""
from lxml import etree
import mock
import mox
from mox import IgnoreArg
from mox import IsA
import os
from cinder import context
from cinder import exception
from cinder.image import image_utils
from cinder.openstack.common.gettextutils import _
from cinder.openstack.common import log as logging
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.drivers.netapp import api
from cinder.volume.drivers.netapp import nfs as netapp_nfs
from cinder.volume.drivers.netapp import utils
from oslo.config import cfg
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
def create_configuration():
configuration = mox.MockObject(conf.Configuration)
configuration.append_config_values(mox.IgnoreArg())
configuration.nfs_mount_point_base = '/mnt/test'
configuration.nfs_mount_options = None
return configuration
class FakeVolume(object):
def __init__(self, size=0):
self.size = size
self.id = hash(self)
self.name = None
def __getitem__(self, key):
return self.__dict__[key]
def __setitem__(self, key, val):
self.__dict__[key] = val
class FakeSnapshot(object):
def __init__(self, volume_size=0):
self.volume_name = None
self.name = None
self.volume_id = None
self.volume_size = volume_size
self.user_id = None
self.status = None
def __getitem__(self, key):
return self.__dict__[key]
class FakeResponse(object):
def __init__(self, status):
"""Initialize FakeResponse.
:param status: Either 'failed' or 'passed'
"""
self.Status = status
if status == 'failed':
self.Reason = 'Sample error'
class NetappDirectCmodeNfsDriverTestCase(test.TestCase):
"""Test direct NetApp C Mode driver."""
def setUp(self):
super(NetappDirectCmodeNfsDriverTestCase, self).setUp()
self._custom_setup()
def test_create_snapshot(self):
"""Test snapshot can be created and deleted."""
mox = self.mox
drv = self._driver
mox.StubOutWithMock(drv, '_clone_volume')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
mox.ReplayAll()
drv.create_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_create_volume_from_snapshot(self):
"""Tests volume creation from snapshot."""
drv = self._driver
mox = self.mox
volume = FakeVolume(1)
snapshot = FakeSnapshot(1)
location = '127.0.0.1:/nfs'
expected_result = {'provider_location': location}
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_get_volume_location')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
drv._clone_volume(IgnoreArg(), IgnoreArg(), IgnoreArg())
drv._get_volume_location(IgnoreArg()).AndReturn(location)
drv.local_path(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all(IgnoreArg())
mox.ReplayAll()
loc = drv.create_volume_from_snapshot(volume, snapshot)
self.assertEqual(loc, expected_result)
mox.VerifyAll()
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
drv._post_prov_deprov_in_ssc(IgnoreArg())
mox.ReplayAll()
return mox
def test_delete_existing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(True)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def test_delete_missing_snapshot(self):
drv = self._driver
mox = self._prepare_delete_snapshot_mock(False)
drv.delete_snapshot(FakeSnapshot())
mox.VerifyAll()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
setattr(drv, 'ssc_enabled', False)
mox.StubOutWithMock(netapp_nfs.NetAppDirectNfsDriver, '_check_flags')
netapp_nfs.NetAppDirectNfsDriver._check_flags()
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_host_ip')
mox.StubOutWithMock(drv, '_get_export_path')
mox.StubOutWithMock(drv, '_get_if_info_by_ip')
mox.StubOutWithMock(drv, '_get_vol_by_junc_vserver')
mox.StubOutWithMock(drv, '_clone_file')
mox.StubOutWithMock(drv, '_post_prov_deprov_in_ssc')
drv._get_host_ip(IgnoreArg()).AndReturn('127.0.0.1')
drv._get_export_path(IgnoreArg()).AndReturn('/nfs')
drv._get_if_info_by_ip('127.0.0.1').AndReturn(
self._prepare_info_by_ip_response())
drv._get_vol_by_junc_vserver('openstack', '/nfs').AndReturn('nfsvol')
drv._clone_file('nfsvol', 'volume_name', 'clone_name',
'openstack')
drv._post_prov_deprov_in_ssc(IgnoreArg())
return mox
def _prepare_info_by_ip_response(self):
res = """<attributes-list>
<net-interface-info>
<address>127.0.0.1</address>
<administrative-status>up</administrative-status>
<current-node>fas3170rre-cmode-01</current-node>
<current-port>e1b-1165</current-port>
<data-protocols>
<data-protocol>nfs</data-protocol>
</data-protocols>
<dns-domain-name>none</dns-domain-name>
<failover-group/>
<failover-policy>disabled</failover-policy>
<firewall-policy>data</firewall-policy>
<home-node>fas3170rre-cmode-01</home-node>
<home-port>e1b-1165</home-port>
<interface-name>nfs_data1</interface-name>
<is-auto-revert>false</is-auto-revert>
<is-home>true</is-home>
<netmask>255.255.255.0</netmask>
<netmask-length>24</netmask-length>
<operational-status>up</operational-status>
<role>data</role>
<routing-group-name>c10.63.165.0/24</routing-group-name>
<use-failover-group>disabled</use-failover-group>
<vserver>openstack</vserver>
</net-interface-info></attributes-list>"""
response_el = etree.XML(res)
return api.NaElement(response_el).get_children()
def test_clone_volume(self):
drv = self._driver
mox = self._prepare_clone_mock('pass')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
share = 'ip:/share'
drv._clone_volume(volume_name, clone_name, volume_id, share)
mox.VerifyAll()
def test_register_img_in_cache_noshare(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_register_img_in_cache_with_share(self):
volume = {'id': '1', 'name': 'testvol'}
volume['provider_location'] = '10.61.170.1:/share/path'
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
drv._do_clone_rel_img_cache('testvol', 'img-cache-12345',
'10.61.170.1:/share/path',
'img-cache-12345')
mox.ReplayAll()
drv._register_image_in_cache(volume, '12345')
mox.VerifyAll()
def test_find_image_in_cache_no_shares(self):
drv = self._driver
drv._mounted_shares = []
result = drv._find_image_in_cache('image_id')
if not result:
pass
else:
self.fail('Return result is unexpected')
def test_find_image_in_cache_shares(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(os.path, 'exists')
drv._get_mount_point_for_share('testshare').AndReturn('/mnt')
os.path.exists('/mnt/img-cache-id').AndReturn(True)
mox.ReplayAll()
result = drv._find_image_in_cache('id')
(share, file_name) = result[0]
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if (share == 'testshare' and file_name == 'img-cache-id'):
pass
else:
LOG.warn(_("Share %(share)s and file name %(file_name)s")
% {'share': share, 'file_name': file_name})
self.fail('Return result is unexpected')
def test_find_old_cache_files_notexists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', 720)
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((None, ''))
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == 0:
pass
else:
self.fail('No files expected but got return values.')
def test_find_old_cache_files_exists(self):
drv = self._driver
mox = self.mox
cmd = ['find', '/mnt', '-maxdepth', '1', '-name',
'img-cache*', '-amin', '+720']
setattr(drv.configuration, 'expiry_thres_minutes', '720')
files = '/mnt/img-id1\n/mnt/img-id2\n'
r_files = ['img-id1', 'img-id2']
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_shortlist_del_eligible_files')
drv._get_mount_point_for_share('share').AndReturn('/mnt')
drv._execute(*cmd, run_as_root=True).AndReturn((files, None))
drv._shortlist_del_eligible_files(
IgnoreArg(), r_files).AndReturn(r_files)
mox.ReplayAll()
res = drv._find_old_cache_files('share')
mox.VerifyAll()
if len(res) == len(r_files):
for f in res:
r_files.remove(f)
else:
self.fail('Returned files not same as expected.')
def test_delete_files_till_bytes_free_success(self):
drv = self._driver
mox = self.mox
files = [('img-cache-1', 230), ('img-cache-2', 380)]
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(drv, '_delete_file')
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._delete_file('/mnt/img-cache-2').AndReturn(True)
drv._delete_file('/mnt/img-cache-1').AndReturn(True)
mox.ReplayAll()
drv._delete_files_till_bytes_free(files, 'share', bytes_to_free=1024)
mox.VerifyAll()
def test_clean_image_cache_exec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_find_old_cache_files')
mox.StubOutWithMock(drv, '_delete_files_till_bytes_free')
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 19, 81))
drv._find_old_cache_files('testshare').AndReturn(['f1', 'f2'])
drv._delete_files_till_bytes_free(
['f1', 'f2'], 'testshare', bytes_to_free=31)
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clean_image_cache_noexec(self):
drv = self._driver
mox = self.mox
drv.configuration.thres_avl_size_perc_start = 20
drv.configuration.thres_avl_size_perc_stop = 50
drv._mounted_shares = ['testshare']
mox.StubOutWithMock(drv, '_get_capacity_info')
drv._get_capacity_info('testshare').AndReturn((100, 30, 70))
mox.ReplayAll()
drv._clean_image_cache()
mox.VerifyAll()
drv._mounted_shares.remove('testshare')
if not drv.cleaning:
pass
else:
self.fail('Clean image cache failed.')
def test_clone_image_fromcache(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_do_clone_rel_img_cache')
mox.StubOutWithMock(drv, '_post_clone_image')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn(
[('share', 'file_name')])
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._do_clone_rel_img_cache('file_name', 'vol', 'share', 'file_name')
drv._post_clone_image(volume)
mox.ReplayAll()
drv.clone_image(volume, ('image_location', None), 'image_id', {})
mox.VerifyAll()
def get_img_info(self, format):
class img_info(object):
def __init__(self, fmt):
self.file_format = fmt
return img_info(format)
def test_clone_image_cloneableshare_nospace(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(False)
mox.ReplayAll()
(prop, cloned) = drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
if not cloned and not prop['provider_location']:
pass
else:
self.fail('Expected not cloned, got cloned.')
def test_clone_image_cloneableshare_raw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share(IgnoreArg()).AndReturn('127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('raw'))
drv._clone_volume(
'img-id', 'vol', share='127.0.0.1:/share', volume_id=None)
drv._get_mount_point_for_share(IgnoreArg()).AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1:/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_cloneableshare_notraw(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file({'name': 'vol'}, IgnoreArg())
mox.ReplayAll()
drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
def test_clone_image_file_not_discovered(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(False)
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_clone_image_resizefails(self):
drv = self._driver
mox = self.mox
volume = {'name': 'vol', 'size': '20'}
mox.StubOutWithMock(drv, '_find_image_in_cache')
mox.StubOutWithMock(drv, '_is_cloneable_share')
mox.StubOutWithMock(drv, '_get_mount_point_for_share')
mox.StubOutWithMock(image_utils, 'qemu_img_info')
mox.StubOutWithMock(drv, '_clone_volume')
mox.StubOutWithMock(drv, '_discover_file_till_timeout')
mox.StubOutWithMock(drv, '_set_rw_permissions_for_all')
mox.StubOutWithMock(drv, '_resize_image_file')
mox.StubOutWithMock(image_utils, 'convert_image')
mox.StubOutWithMock(drv, '_register_image_in_cache')
mox.StubOutWithMock(drv, '_is_share_vol_compatible')
mox.StubOutWithMock(drv, 'local_path')
mox.StubOutWithMock(os.path, 'exists')
mox.StubOutWithMock(drv, '_delete_file')
drv._find_image_in_cache(IgnoreArg()).AndReturn([])
drv._is_cloneable_share('nfs://127.0.0.1/share/img-id').AndReturn(
'127.0.0.1:/share')
drv._is_share_vol_compatible(IgnoreArg(), IgnoreArg()).AndReturn(True)
drv._get_mount_point_for_share('127.0.0.1:/share').AndReturn('/mnt')
image_utils.qemu_img_info('/mnt/img-id').AndReturn(
self.get_img_info('notraw'))
image_utils.convert_image(IgnoreArg(), IgnoreArg(), 'raw')
image_utils.qemu_img_info('/mnt/vol').AndReturn(
self.get_img_info('raw'))
drv._register_image_in_cache(IgnoreArg(), IgnoreArg())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
drv._discover_file_till_timeout(IgnoreArg()).AndReturn(True)
drv._set_rw_permissions_for_all('/mnt/vol')
drv._resize_image_file(
IgnoreArg(), IgnoreArg()).AndRaise(exception.InvalidResults())
drv.local_path(IgnoreArg()).AndReturn('/mnt/vol')
os.path.exists('/mnt/vol').AndReturn(True)
drv._delete_file('/mnt/vol')
mox.ReplayAll()
vol_dict, result = drv. clone_image(
volume, ('nfs://127.0.0.1/share/img-id', None), 'image_id', {})
mox.VerifyAll()
self.assertFalse(result)
self.assertFalse(vol_dict['bootable'])
self.assertIsNone(vol_dict['provider_location'])
def test_is_cloneable_share_badformats(self):
drv = self._driver
strgs = ['10.61.666.22:/share/img',
'nfs://10.61.666.22:/share/img',
'nfs://10.61.666.22//share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com//share/img',
'com.netapp.com://share/im\g',
'http://com.netapp.com://share/img',
'nfs://com.netapp.com:/share/img',
'nfs://com.netapp.com:8080//share/img'
'nfs://com.netapp.com//img',
'nfs://[ae::sr::ty::po]/img']
for strg in strgs:
res = drv._is_cloneable_share(strg)
if res:
msg = 'Invalid format matched for url %s.' % strg
self.fail(msg)
def test_is_cloneable_share_goodformat1(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat2(self):
drv = self._driver
mox = self.mox
strg = 'nfs://10.61.222.333:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat3(self):
drv = self._driver
mox = self.mox
strg = 'nfs://com.netapp:8080/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat4(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/share/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_is_cloneable_share_goodformat5(self):
drv = self._driver
mox = self.mox
strg = 'nfs://netapp.com/img'
mox.StubOutWithMock(drv, '_check_share_in_use')
drv._check_share_in_use(IgnoreArg(), IgnoreArg()).AndReturn('share')
mox.ReplayAll()
drv._is_cloneable_share(strg)
mox.VerifyAll()
def test_check_share_in_use_no_conn(self):
drv = self._driver
share = drv._check_share_in_use(None, '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_invalid_conn(self):
drv = self._driver
share = drv._check_share_in_use(':8989', '/dir')
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_incorrect_host(self):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(utils, 'resolve_hostname')
utils.resolve_hostname(IgnoreArg()).AndRaise(Exception())
mox.ReplayAll()
share = drv._check_share_in_use('incorrect:8989', '/dir')
mox.VerifyAll()
if share:
self.fail('Unexpected share detected.')
def test_check_share_in_use_success(self):
drv = self._driver
mox = self.mox
drv._mounted_shares = ['127.0.0.1:/dir/share']
mox.StubOutWithMock(utils, 'resolve_hostname')
mox.StubOutWithMock(drv, '_share_match_for_ip')
utils.resolve_hostname(IgnoreArg()).AndReturn('10.22.33.44')
drv._share_match_for_ip(
'10.22.33.44', ['127.0.0.1:/dir/share']).AndReturn('share')
mox.ReplayAll()
share = drv._check_share_in_use('127.0.0.1:8989', '/dir/share')
mox.VerifyAll()
if not share:
self.fail('Expected share not detected')
def test_construct_image_url_loc(self):
drv = self._driver
img_loc = (None,
[{'metadata':
{'share_location': 'nfs://host/path',
'mount_point': '/opt/stack/data/glance',
'type': 'nfs'},
'url': 'file:///opt/stack/data/glance/image-id'}])
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
def test_construct_image_url_direct(self):
drv = self._driver
img_loc = ("nfs://host/path/image-id", None)
location = drv._construct_image_nfs_url(img_loc)
if location != "nfs://host/path/image-id":
self.fail("Unexpected direct url.")
class NetappDirectCmodeNfsDriverOnlyTestCase(test.TestCase):
"""Test direct NetApp C Mode driver only and not inherit."""
def setUp(self):
super(NetappDirectCmodeNfsDriverOnlyTestCase, self).setUp()
self._custom_setup()
def _custom_setup(self):
kwargs = {}
kwargs['netapp_mode'] = 'proxy'
kwargs['configuration'] = create_configuration()
self._driver = netapp_nfs.NetAppDirectCmodeNfsDriver(**kwargs)
self._driver.ssc_enabled = True
self._driver.configuration.netapp_copyoffload_tool_path = 'cof_path'
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {}
mock_volume_extra_specs.return_value = extra_specs
fake_share = 'localhost:myshare'
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
volume_info = self._driver.create_volume(FakeVolume(1))
self.assertEqual(volume_info.get('provider_location'),
fake_share)
@mock.patch.object(netapp_nfs, 'get_volume_extra_specs')
def test_create_volume_with_qos_policy(self, mock_volume_extra_specs):
drv = self._driver
drv.ssc_enabled = False
extra_specs = {'netapp:qos_policy_group': 'qos_policy_1'}
fake_volume = FakeVolume(1)
fake_share = 'localhost:myshare'
fake_qos_policy = 'qos_policy_1'
mock_volume_extra_specs.return_value = extra_specs
with mock.patch.object(drv, '_ensure_shares_mounted'):
with mock.patch.object(drv, '_find_shares',
return_value=['localhost:myshare']):
with mock.patch.object(drv, '_do_create_volume'):
with mock.patch.object(drv,
'_set_qos_policy_group_on_volume'
) as mock_set_qos:
volume_info = self._driver.create_volume(fake_volume)
self.assertEqual(volume_info.get('provider_location'),
'localhost:myshare')
mock_set_qos.assert_called_once_with(fake_volume,
fake_share,
fake_qos_policy)
def test_copy_img_to_vol_copyoffload_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_failure(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._try_copyoffload = mock.Mock(side_effect=Exception())
netapp_nfs.NetAppNFSDriver.copy_image_to_volume = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='share')
drv._get_vol_for_share = mock.Mock(return_value='vol')
drv._update_stale_vols = mock.Mock()
drv.copy_image_to_volume(context, volume, image_service, image_id)
drv._try_copyoffload.assert_called_once_with(context, volume,
image_service,
image_id)
netapp_nfs.NetAppNFSDriver.copy_image_to_volume.\
assert_called_once_with(context, volume, image_service, image_id)
drv._update_stale_vols.assert_called_once_with('vol')
def test_copy_img_to_vol_copyoffload_nonexistent_binary_path(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = mock.Mock()
image_service.get_location.return_value = (mock.Mock(), mock.Mock())
image_service.show.return_value = {'size': 0}
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._construct_image_nfs_url = mock.Mock(return_value="")
drv._check_get_nfs_path_segs = mock.Mock(return_value=("test:test",
"dr"))
drv._get_ip_verify_on_cluster = mock.Mock(return_value="192.1268.1.1")
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._get_host_ip = mock.Mock()
drv._get_provider_location = mock.Mock()
drv._get_export_path = mock.Mock(return_value="dr")
drv._check_share_can_hold_size = mock.Mock()
# Raise error as if the copyoffload file can not be found
drv._clone_file_dst_exists = mock.Mock(side_effect=OSError())
# Verify the original error is propagated
self.assertRaises(OSError, drv._try_copyoffload,
context, volume, image_service, image_id)
def test_copyoffload_frm_cache_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._find_image_in_cache = mock.Mock(return_value=[('share', 'img')])
drv._copy_from_cache = mock.Mock(return_value=True)
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_cache.assert_called_once_with(volume,
image_id,
[('share', 'img')])
def test_copyoffload_frm_img_service_success(self):
drv = self._driver
context = object()
volume = {'id': 'vol_id', 'name': 'name'}
image_service = object()
image_id = 'image_id'
drv._client = mock.Mock()
drv._client.get_api_version = mock.Mock(return_value=(1, 20))
drv._find_image_in_cache = mock.Mock(return_value=[])
drv._copy_from_img_service = mock.Mock()
drv._try_copyoffload(context, volume, image_service, image_id)
drv._copy_from_img_service.assert_called_once_with(context,
volume,
image_service,
image_id)
def test_cache_copyoffload_workflow_success(self):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
cache_result = [('ip1:/openstack', 'img-cache-imgid')]
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._execute = mock.Mock()
drv._register_image_in_cache = mock.Mock()
drv._get_provider_location = mock.Mock(return_value='/share')
drv._post_clone_image = mock.Mock()
copied = drv._copy_from_cache(volume, image_id, cache_result)
self.assertTrue(copied)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._execute.assert_called_once_with('cof_path', 'ip1', 'ip1',
'/openstack/img-cache-imgid',
'/exp_path/name',
run_as_root=False,
check_exit_code=0)
drv._post_clone_image.assert_called_with(volume)
drv._get_provider_location.assert_called_with('vol_id')
@mock.patch.object(image_utils, 'qemu_img_info')
def test_img_service_raw_copyoffload_workflow_success(self,
mock_qemu_img_info):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'raw'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
drv._discover_file_till_timeout = mock.Mock(return_value=True)
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert drv._execute.call_count == 1
drv._post_clone_image.assert_called_with(volume)
@mock.patch.object(image_utils, 'convert_image')
@mock.patch.object(image_utils, 'qemu_img_info')
@mock.patch('os.path.exists')
def test_img_service_qcow2_copyoffload_workflow_success(self, mock_exists,
mock_qemu_img_info,
mock_cvrt_image):
drv = self._driver
volume = {'id': 'vol_id', 'name': 'name', 'size': 1}
image_id = 'image_id'
context = object()
image_service = mock.Mock()
image_service.get_location.return_value = ('nfs://ip1/openstack/img',
None)
image_service.show.return_value = {'size': 1,
'disk_format': 'qcow2'}
drv._check_get_nfs_path_segs = mock.Mock(return_value=
('ip1', '/openstack'))
drv._get_ip_verify_on_cluster = mock.Mock(return_value='ip1')
drv._get_host_ip = mock.Mock(return_value='ip2')
drv._get_export_path = mock.Mock(return_value='/exp_path')
drv._get_provider_location = mock.Mock(return_value='share')
drv._execute = mock.Mock()
drv._get_mount_point_for_share = mock.Mock(return_value='mnt_point')
img_inf = mock.Mock()
img_inf.file_format = 'raw'
mock_qemu_img_info.return_value = img_inf
drv._check_share_can_hold_size = mock.Mock()
drv._move_nfs_file = mock.Mock(return_value=True)
drv._delete_file = mock.Mock()
drv._clone_file_dst_exists = mock.Mock()
drv._post_clone_image = mock.Mock()
drv._copy_from_img_service(context, volume, image_service, image_id)
drv._get_ip_verify_on_cluster.assert_any_call('ip1')
drv._get_export_path.assert_called_with('vol_id')
drv._check_share_can_hold_size.assert_called_with('share', 1)
assert mock_cvrt_image.call_count == 1
assert drv._execute.call_count == 1
assert drv._delete_file.call_count == 2
drv._clone_file_dst_exists.call_count == 1
drv._post_clone_image.assert_called_with(volume)
class NetappDirect7modeNfsDriverTestCase(NetappDirectCmodeNfsDriverTestCase):
"""Test direct NetApp C Mode driver."""
def _custom_setup(self):
self._driver = netapp_nfs.NetAppDirect7modeNfsDriver(
configuration=create_configuration())
def _prepare_delete_snapshot_mock(self, snapshot_exists):
drv = self._driver
mox = self.mox
mox.StubOutWithMock(drv, '_get_provider_location')
mox.StubOutWithMock(drv, '_volume_not_present')
if snapshot_exists:
mox.StubOutWithMock(drv, '_execute')
mox.StubOutWithMock(drv, '_get_volume_path')
drv._get_provider_location(IgnoreArg())
drv._volume_not_present(IgnoreArg(), IgnoreArg())\
.AndReturn(not snapshot_exists)
if snapshot_exists:
drv._get_volume_path(IgnoreArg(), IgnoreArg())
drv._execute('rm', None, run_as_root=True)
mox.ReplayAll()
return mox
def test_check_for_setup_error_version(self):
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
# check exception raises when version not found
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
drv._client.set_api_version(1, 8)
# check exception raises when not supported version
self.assertRaises(exception.VolumeBackendAPIException,
drv.check_for_setup_error)
def test_check_for_setup_error(self):
mox = self.mox
drv = self._driver
drv._client = api.NaServer("127.0.0.1")
drv._client.set_api_version(1, 9)
required_flags = [
'netapp_transport_type',
'netapp_login',
'netapp_password',
'netapp_server_hostname',
'netapp_server_port']
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, None)
# check exception raises when flags are not set
self.assertRaises(exception.CinderException,
drv.check_for_setup_error)
# set required flags
for flag in required_flags:
setattr(drv.configuration, flag, 'val')
mox.ReplayAll()
drv.check_for_setup_error()
mox.VerifyAll()
# restore initial FLAGS
for flag in required_flags:
delattr(drv.configuration, flag)
def test_do_setup(self):
mox = self.mox
drv = self._driver
mox.StubOutWithMock(netapp_nfs.NetAppNFSDriver, 'do_setup')
mox.StubOutWithMock(drv, '_get_client')
mox.StubOutWithMock(drv, '_do_custom_setup')
netapp_nfs.NetAppNFSDriver.do_setup(IgnoreArg())
drv._get_client()
drv._do_custom_setup(IgnoreArg())
mox.ReplayAll()
drv.do_setup(IsA(context.RequestContext))
mox.VerifyAll()
def _prepare_clone_mock(self, status):
drv = self._driver
mox = self.mox
volume = FakeVolume()
setattr(volume, 'provider_location', '127.0.0.1:/nfs')
mox.StubOutWithMock(drv, '_get_export_ip_path')
mox.StubOutWithMock(drv, '_get_actual_path_for_export')
mox.StubOutWithMock(drv, '_start_clone')
mox.StubOutWithMock(drv, '_wait_for_clone_finish')
if status == 'fail':
mox.StubOutWithMock(drv, '_clear_clone')
drv._get_export_ip_path(
IgnoreArg(), IgnoreArg()).AndReturn(('127.0.0.1', '/nfs'))
drv._get_actual_path_for_export(IgnoreArg()).AndReturn('/vol/vol1/nfs')
drv._start_clone(IgnoreArg(), IgnoreArg()).AndReturn(('1', '2'))
if status == 'fail':
drv._wait_for_clone_finish('1', '2').AndRaise(
api.NaApiError('error', 'error'))
drv._clear_clone('1')
else:
drv._wait_for_clone_finish('1', '2')
return mox
def test_clone_volume_clear(self):
drv = self._driver
mox = self._prepare_clone_mock('fail')
mox.ReplayAll()
volume_name = 'volume_name'
clone_name = 'clone_name'
volume_id = volume_name + str(hash(volume_name))
try:
drv._clone_volume(volume_name, clone_name, volume_id)
except Exception as e:
if isinstance(e, api.NaApiError):
pass
else:
raise
mox.VerifyAll()
| github-borat/cinder | cinder/tests/test_netapp_nfs.py | Python | apache-2.0 | 47,799 |
/* Copyright (c) The m-m-m Team, Licensed under the Apache License, Version 2.0
* http://www.apache.org/licenses/LICENSE-2.0 */
package net.sf.mmm.service.base.client;
import net.sf.mmm.service.api.RemoteInvocationCall;
import net.sf.mmm.util.lang.api.function.Consumer;
/**
* This is a simple container for the data corresponding to a {@link RemoteInvocationCall}.
*
* @param <RESULT> is the generic type of the method return-type.
* @param <CALL> is the generic type of the {@link #getCall() call} data.
* @author Joerg Hohwiller (hohwille at users.sourceforge.net)
* @since 1.0.0
*/
public class RemoteInvocationCallData<RESULT, CALL extends RemoteInvocationCall> {
/** The callback to receive the service result on success. */
private final Consumer<? extends RESULT> successCallback;
/** The callback to receive a potential service failure. */
private final Consumer<Throwable> failureCallback;
/** @see #getCall() */
private CALL call;
/**
* The constructor.
*
* @param successCallback is the callback that {@link Consumer#accept(Object) receives} the result on
* success.
* @param failureCallback is the callback that {@link Consumer#accept(Object) receives} the failure on
* error.
*/
public RemoteInvocationCallData(Consumer<? extends RESULT> successCallback, Consumer<Throwable> failureCallback) {
super();
this.successCallback = successCallback;
this.failureCallback = failureCallback;
}
/**
* @return the successCallback.
*/
public Consumer<? extends RESULT> getSuccessCallback() {
return this.successCallback;
}
/**
* @return the failureCallback.
*/
public Consumer<Throwable> getFailureCallback() {
return this.failureCallback;
}
/**
* @return the actual call data (either {@link net.sf.mmm.service.api.command.RemoteInvocationCommand}
* itself or {@link net.sf.mmm.service.base.rpc.GenericRemoteInvocationRpcCall}).
*/
public CALL getCall() {
return this.call;
}
/**
* @param call is the new value of {@link #getCall()}.
*/
public void setCall(CALL call) {
assert (this.call == null);
assert (call != null);
this.call = call;
}
}
| m-m-m/service | base/src/main/java/net/sf/mmm/service/base/client/RemoteInvocationCallData.java | Java | apache-2.0 | 2,216 |
/**
*
*/
package jframe.core.plugin;
import java.util.EventListener;
/**
* @author dzh
* @date Sep 12, 2013 9:42:33 PM
* @since 1.0
*/
public interface PluginListener extends EventListener {
void pluginChanged(PluginEvent event);
}
| dzh/jframe | jframe/jframe-core/src/main/java/jframe/core/plugin/PluginListener.java | Java | apache-2.0 | 244 |
/*
* Created on May 17, 2004
*
* Paros and its related class files.
*
* Paros is an HTTP/HTTPS proxy for assessing web application security.
* Copyright (C) 2003-2004 Chinotec Technologies Company
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the Clarified Artistic License
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* Clarified Artistic License for more details.
*
* You should have received a copy of the Clarified Artistic License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
// ZAP: 2013/01/16 Minor fix to prevent NPE
// ZAP: 2014/10/17 Issue 1308: Updated for latest icons
// ZAP: 2015/02/10 Issue 1528: Support user defined font size
// ZAP: 2015/09/07 Move icon loading to a utility class
package org.parosproxy.paros.view;
import java.awt.Dimension;
import java.awt.Frame;
import java.awt.Image;
import java.awt.Point;
import java.awt.Toolkit;
import java.awt.event.ComponentAdapter;
import java.awt.event.ComponentEvent;
import java.awt.event.WindowEvent;
import java.awt.event.WindowStateListener;
import java.util.ArrayList;
import java.util.List;
import java.util.prefs.BackingStoreException;
import java.util.prefs.Preferences;
import javax.swing.JFrame;
import org.apache.log4j.Logger;
import org.parosproxy.paros.Constant;
import org.zaproxy.zap.utils.DisplayUtils;
/**
* Generic Frame, which handles some basic properties.
* <ul>
* <li>Sets the icon(s) for the frame, which are the ZAP icons</li>
* <li>Centers the frame on screen</li>
* <li>Sets the frame to _not_ visible</li>
* <li>Sets a common font for the frame</li>
* <li>Sets a default title (ZAP application name)</li>
* <li>Preserves window state, location and size correctly (will survive multiple session)</li>
* </ul>
* Hint for implementers: If you use this class,
* don't use {@link #setSize(Dimension)}, but {@link #setPreferredSize(Dimension)}
* instead. Also, don't use {@link #setLocation(Point)}. This abstract class
* will automatically take care of size and position.
*/
public abstract class AbstractFrame extends JFrame {
private static final long serialVersionUID = 6751593232255236597L;
private static final String PREF_WINDOW_STATE = "window.state";
private static final String PREF_WINDOW_SIZE = "window.size";
private static final String PREF_WINDOW_POSITION = "window.position";
private static final int WINDOW_DEFAULT_WIDTH = 800;
private static final int WINDOW_DEFAULT_HEIGHT = 600;
/**
* Hint: Preferences are only saved by package.
* We have to use a prefix for separation.
*/
private final Preferences preferences;
private final String prefnzPrefix = this.getClass().getSimpleName()+".";
private final Logger logger = Logger.getLogger(AbstractFrame.class);
/**
* This is the default constructor
*/
public AbstractFrame() {
super();
this.preferences = Preferences.userNodeForPackage(getClass());
initialize();
}
/**
* This method initializes this
*/
private void initialize() {
// ZAP: Rebrand
this.setIconImages(DisplayUtils.getZapIconImages());
this.setVisible(false);
this.setTitle(Constant.PROGRAM_NAME);
final Dimension dim = restoreWindowSize();
if (dim == null) {
this.setSize(WINDOW_DEFAULT_WIDTH, WINDOW_DEFAULT_HEIGHT);
}
final Point point = restoreWindowLocation();
if (point == null) {
centerFrame();
}
restoreWindowState();
this.addWindowStateListener(new FrameWindowStateListener());
this.addComponentListener(new FrameResizedListener());
}
/**
* Centre this frame.
*
*/
public void centerFrame() {
final Dimension screenSize = Toolkit.getDefaultToolkit().getScreenSize();
final Dimension frameSize = this.getSize();
if (frameSize.height > screenSize.height) {
frameSize.height = screenSize.height;
}
if (frameSize.width > screenSize.width) {
frameSize.width = screenSize.width;
}
this.setLocation((screenSize.width - frameSize.width) / 2, (screenSize.height - frameSize.height) / 2);
}
/**
* @param windowstate integer value, see {@link JFrame#getExtendedState()}
*/
private void saveWindowState(int windowstate) {
if ((windowstate & Frame.ICONIFIED) == Frame.ICONIFIED) {
preferences.put(prefnzPrefix+PREF_WINDOW_STATE, SimpleWindowState.ICONFIED.toString());
if (logger.isDebugEnabled()) logger.debug("Saving preference "+PREF_WINDOW_STATE+"=" + SimpleWindowState.ICONFIED);
}
if ((windowstate & Frame.MAXIMIZED_BOTH) == Frame.MAXIMIZED_BOTH) {
preferences.put(prefnzPrefix+PREF_WINDOW_STATE, SimpleWindowState.MAXIMIZED.toString());
if (logger.isDebugEnabled()) logger.debug("Saving preference "+PREF_WINDOW_STATE+"=" + SimpleWindowState.MAXIMIZED);
}
if (windowstate == Frame.NORMAL) { // hint: Frame.NORMAL = 0, thats why no masking
preferences.put(prefnzPrefix+PREF_WINDOW_STATE, SimpleWindowState.NORMAL.toString());
if (logger.isDebugEnabled()) logger.debug("Saving preference "+PREF_WINDOW_STATE+"=" + SimpleWindowState.NORMAL);
}
}
/**
* Loads and sets the last window state of the frame.
* Additionally, the last state will be returned.
*
* @return last window state OR null
*/
private SimpleWindowState restoreWindowState() {
SimpleWindowState laststate = null;
final String statestr = preferences.get(prefnzPrefix+PREF_WINDOW_STATE, null);
if (logger.isDebugEnabled()) logger.debug("Restoring preference "+PREF_WINDOW_STATE+"=" + statestr);
if (statestr != null) {
SimpleWindowState state = null;
try {
state = SimpleWindowState.valueOf(statestr);
} catch (final IllegalArgumentException e) { state = null; }
if (state != null) {
switch (state) {
case ICONFIED: this.setExtendedState(Frame.ICONIFIED); break;
case NORMAL: this.setExtendedState(Frame.NORMAL); break;
case MAXIMIZED: this.setExtendedState(Frame.MAXIMIZED_BOTH); break;
default:
logger.error("Invalid window state (nothing will changed): " + statestr);
}
}
laststate = state;
}
return laststate;
}
/**
* Saves the size of this frame, but only, if window state is 'normal'.
* If window state is iconfied or maximized, the size is not saved!
*
* @param size
*/
private void saveWindowSize(Dimension size) {
if (size != null) {
if (getExtendedState() == Frame.NORMAL) {
if (logger.isDebugEnabled()) logger.debug("Saving preference " + PREF_WINDOW_SIZE + "=" + size.width + "," + size.height);
this.preferences.put(prefnzPrefix+PREF_WINDOW_SIZE, size.width + "," + size.height);
} else {
if (logger.isDebugEnabled()) logger.debug("Preference " + PREF_WINDOW_SIZE + " not saved, cause window state is not 'normal'.");
}
}
}
/**
* Loads and set the saved size preferences for this frame.
*
* @return the size of the frame OR null, if there wasn't any preference.
*/
private Dimension restoreWindowSize() {
Dimension result = null;
final String sizestr = preferences.get(prefnzPrefix+PREF_WINDOW_SIZE, null);
if (sizestr != null) {
int width = 0;
int height = 0;
final String[] sizes = sizestr.split("[,]");
try {
width = Integer.parseInt(sizes[0].trim());
height = Integer.parseInt(sizes[1].trim());
} catch (final Exception e) {
// ignoring, cause is prevented by default values;
}
if (width > 0 && height > 0) {
result = new Dimension(width, height);
if (logger.isDebugEnabled()) logger.debug("Restoring preference " + PREF_WINDOW_SIZE + "=" + result.width + "," + result.height);
this.setSize(result);
}
}
return result;
}
/**
* Saves the location of this frame, but only, if window state is 'normal'.
* If window state is iconfied or maximized, the location is not saved!
*
* @param point
*/
private void saveWindowLocation(Point point) {
if (point != null) {
if (getExtendedState() == Frame.NORMAL) {
if (logger.isDebugEnabled()) logger.debug("Saving preference " + PREF_WINDOW_POSITION + "=" + point.x + "," + point.y);
this.preferences.put(prefnzPrefix+PREF_WINDOW_POSITION, point.x + "," + point.y);
} else {
if (logger.isDebugEnabled()) logger.debug("Preference " + PREF_WINDOW_POSITION + " not saved, cause window state is not 'normal'.");
}
}
}
/**
* Loads and set the saved position preferences for this frame.
*
* @return the size of the frame OR null, if there wasn't any preference.
*/
private Point restoreWindowLocation() {
Point result = null;
final String sizestr = preferences.get(prefnzPrefix+PREF_WINDOW_POSITION, null);
if (sizestr != null) {
int x = 0;
int y = 0;
final String[] sizes = sizestr.split("[,]");
try {
x = Integer.parseInt(sizes[0].trim());
y = Integer.parseInt(sizes[1].trim());
} catch (final Exception e) {
// ignoring, cause is prevented by default values;
}
if (x > 0 && y > 0) {
result = new Point(x, y);
if (logger.isDebugEnabled()) logger.debug("Restoring preference " + PREF_WINDOW_POSITION + "=" + result.x + "," + result.y);
this.setLocation(result);
}
}
return result;
}
/**
* @deprecated (2.4.2) Use {@link DisplayUtils#getZapIconImages()} instead. It will be removed in a future release.
*/
@Deprecated
@SuppressWarnings("javadoc")
protected List<Image> loadIconImages() {
return new ArrayList<>(DisplayUtils.getZapIconImages());
}
@Override
public void dispose() {
super.dispose();
try {
this.preferences.flush();
} catch (final BackingStoreException e) {
logger.error("Error while saving the preferences", e);
}
}
/*
* ========================================================================
*/
private final class FrameWindowStateListener implements WindowStateListener {
@Override
public void windowStateChanged(WindowEvent e) {
saveWindowState(e.getNewState());
}
}
private final class FrameResizedListener extends ComponentAdapter {
@Override
public void componentResized(ComponentEvent e) {
if (e.getComponent() != null) {
saveWindowSize(e.getComponent().getSize());
}
}
@Override
public void componentMoved(ComponentEvent e) {
if (e.getComponent() != null) {
saveWindowLocation(e.getComponent().getLocation());
}
}
}
/**
* Simplified version for easier handling of the states ...
*/
private enum SimpleWindowState {
ICONFIED,
NORMAL,
MAXIMIZED;
}
} // @jve:visual-info decl-index=0 visual-constraint="31,17"
| GillesMoris/OSS | src/org/parosproxy/paros/view/AbstractFrame.java | Java | apache-2.0 | 10,722 |
/**
* FreeRDP: A Remote Desktop Protocol Client
* RDP Security
*
* Copyright 2011 Marc-Andre Moreau <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "security.h"
/* 0x36 repeated 40 times */
static const uint8 pad1[40] =
{
"\x36\x36\x36\x36\x36\x36\x36\x36"
"\x36\x36\x36\x36\x36\x36\x36\x36"
"\x36\x36\x36\x36\x36\x36\x36\x36"
"\x36\x36\x36\x36\x36\x36\x36\x36"
"\x36\x36\x36\x36\x36\x36\x36\x36"
};
/* 0x5C repeated 48 times */
static const uint8 pad2[48] =
{
"\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C"
"\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C"
"\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C"
"\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C"
"\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C"
"\x5C\x5C\x5C\x5C\x5C\x5C\x5C\x5C"
};
static const uint8
fips_reverse_table[256] =
{
0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0,
0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0,
0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8,
0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8,
0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4,
0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4,
0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec,
0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc,
0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2,
0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2,
0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea,
0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa,
0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6,
0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6,
0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee,
0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe,
0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1,
0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1,
0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9,
0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9,
0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5,
0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5,
0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed,
0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd,
0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3,
0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3,
0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb,
0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb,
0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7,
0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7,
0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef,
0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff
};
static const uint8
fips_oddparity_table[256] =
{
0x01, 0x01, 0x02, 0x02, 0x04, 0x04, 0x07, 0x07,
0x08, 0x08, 0x0b, 0x0b, 0x0d, 0x0d, 0x0e, 0x0e,
0x10, 0x10, 0x13, 0x13, 0x15, 0x15, 0x16, 0x16,
0x19, 0x19, 0x1a, 0x1a, 0x1c, 0x1c, 0x1f, 0x1f,
0x20, 0x20, 0x23, 0x23, 0x25, 0x25, 0x26, 0x26,
0x29, 0x29, 0x2a, 0x2a, 0x2c, 0x2c, 0x2f, 0x2f,
0x31, 0x31, 0x32, 0x32, 0x34, 0x34, 0x37, 0x37,
0x38, 0x38, 0x3b, 0x3b, 0x3d, 0x3d, 0x3e, 0x3e,
0x40, 0x40, 0x43, 0x43, 0x45, 0x45, 0x46, 0x46,
0x49, 0x49, 0x4a, 0x4a, 0x4c, 0x4c, 0x4f, 0x4f,
0x51, 0x51, 0x52, 0x52, 0x54, 0x54, 0x57, 0x57,
0x58, 0x58, 0x5b, 0x5b, 0x5d, 0x5d, 0x5e, 0x5e,
0x61, 0x61, 0x62, 0x62, 0x64, 0x64, 0x67, 0x67,
0x68, 0x68, 0x6b, 0x6b, 0x6d, 0x6d, 0x6e, 0x6e,
0x70, 0x70, 0x73, 0x73, 0x75, 0x75, 0x76, 0x76,
0x79, 0x79, 0x7a, 0x7a, 0x7c, 0x7c, 0x7f, 0x7f,
0x80, 0x80, 0x83, 0x83, 0x85, 0x85, 0x86, 0x86,
0x89, 0x89, 0x8a, 0x8a, 0x8c, 0x8c, 0x8f, 0x8f,
0x91, 0x91, 0x92, 0x92, 0x94, 0x94, 0x97, 0x97,
0x98, 0x98, 0x9b, 0x9b, 0x9d, 0x9d, 0x9e, 0x9e,
0xa1, 0xa1, 0xa2, 0xa2, 0xa4, 0xa4, 0xa7, 0xa7,
0xa8, 0xa8, 0xab, 0xab, 0xad, 0xad, 0xae, 0xae,
0xb0, 0xb0, 0xb3, 0xb3, 0xb5, 0xb5, 0xb6, 0xb6,
0xb9, 0xb9, 0xba, 0xba, 0xbc, 0xbc, 0xbf, 0xbf,
0xc1, 0xc1, 0xc2, 0xc2, 0xc4, 0xc4, 0xc7, 0xc7,
0xc8, 0xc8, 0xcb, 0xcb, 0xcd, 0xcd, 0xce, 0xce,
0xd0, 0xd0, 0xd3, 0xd3, 0xd5, 0xd5, 0xd6, 0xd6,
0xd9, 0xd9, 0xda, 0xda, 0xdc, 0xdc, 0xdf, 0xdf,
0xe0, 0xe0, 0xe3, 0xe3, 0xe5, 0xe5, 0xe6, 0xe6,
0xe9, 0xe9, 0xea, 0xea, 0xec, 0xec, 0xef, 0xef,
0xf1, 0xf1, 0xf2, 0xf2, 0xf4, 0xf4, 0xf7, 0xf7,
0xf8, 0xf8, 0xfb, 0xfb, 0xfd, 0xfd, 0xfe, 0xfe
};
static void security_salted_hash(uint8* salt, uint8* input, int length, uint8* salt1, uint8* salt2, uint8* output)
{
CryptoMd5 md5;
CryptoSha1 sha1;
uint8 sha1_digest[CRYPTO_SHA1_DIGEST_LENGTH];
/* SaltedHash(Salt, Input, Salt1, Salt2) = MD5(S + SHA1(Input + Salt + Salt1 + Salt2)) */
/* SHA1_Digest = SHA1(Input + Salt + Salt1 + Salt2) */
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, input, length); /* Input */
crypto_sha1_update(sha1, salt, 48); /* Salt (48 bytes) */
crypto_sha1_update(sha1, salt1, 32); /* Salt1 (32 bytes) */
crypto_sha1_update(sha1, salt2, 32); /* Salt2 (32 bytes) */
crypto_sha1_final(sha1, sha1_digest);
/* SaltedHash(Salt, Input, Salt1, Salt2) = MD5(S + SHA1_Digest) */
md5 = crypto_md5_init();
crypto_md5_update(md5, salt, 48); /* Salt (48 bytes) */
crypto_md5_update(md5, sha1_digest, sizeof(sha1_digest)); /* SHA1_Digest */
crypto_md5_final(md5, output);
}
static void security_premaster_hash(char* input, int length, uint8* premaster_secret, uint8* client_random, uint8* server_random, uint8* output)
{
/* PremasterHash(Input) = SaltedHash(PremasterSecret, Input, ClientRandom, ServerRandom) */
security_salted_hash(premaster_secret, (uint8*)input, length, client_random, server_random, output);
}
void security_master_secret(uint8* premaster_secret, uint8* client_random, uint8* server_random, uint8* output)
{
/* MasterSecret = PremasterHash('A') + PremasterHash('BB') + PremasterHash('CCC') */
security_premaster_hash("A", 1, premaster_secret, client_random, server_random, &output[0]);
security_premaster_hash("BB", 2, premaster_secret, client_random, server_random, &output[16]);
security_premaster_hash("CCC", 3, premaster_secret, client_random, server_random, &output[32]);
}
static void security_master_hash(char* input, int length, uint8* master_secret, uint8* client_random, uint8* server_random, uint8* output)
{
/* MasterHash(Input) = SaltedHash(MasterSecret, Input, ServerRandom, ClientRandom) */
security_salted_hash(master_secret, (uint8*)input, length, server_random, client_random, output);
}
void security_session_key_blob(uint8* master_secret, uint8* client_random, uint8* server_random, uint8* output)
{
/* MasterHash = MasterHash('A') + MasterHash('BB') + MasterHash('CCC') */
security_master_hash("A", 1, master_secret, client_random, server_random, &output[0]);
security_master_hash("BB", 2, master_secret, client_random, server_random, &output[16]);
security_master_hash("CCC", 3, master_secret, client_random, server_random, &output[32]);
}
void security_mac_salt_key(uint8* session_key_blob, uint8* client_random, uint8* server_random, uint8* output)
{
/* MacSaltKey = First128Bits(SessionKeyBlob) */
memcpy(output, session_key_blob, 16);
}
void security_md5_16_32_32(uint8* in0, uint8* in1, uint8* in2, uint8* output)
{
CryptoMd5 md5;
md5 = crypto_md5_init();
crypto_md5_update(md5, in0, 16);
crypto_md5_update(md5, in1, 32);
crypto_md5_update(md5, in2, 32);
crypto_md5_final(md5, output);
}
void security_licensing_encryption_key(uint8* session_key_blob, uint8* client_random, uint8* server_random, uint8* output)
{
/* LicensingEncryptionKey = MD5(Second128Bits(SessionKeyBlob) + ClientRandom + ServerRandom)) */
security_md5_16_32_32(&session_key_blob[16], client_random, server_random, output);
}
void security_uint32_le(uint8* output, uint32 value)
{
output[0] = (value) & 0xFF;
output[1] = (value >> 8) & 0xFF;
output[2] = (value >> 16) & 0xFF;
output[3] = (value >> 24) & 0xFF;
}
void security_mac_data(uint8* mac_salt_key, uint8* data, uint32 length, uint8* output)
{
CryptoMd5 md5;
CryptoSha1 sha1;
uint8 length_le[4];
uint8 sha1_digest[CRYPTO_SHA1_DIGEST_LENGTH];
/* MacData = MD5(MacSaltKey + pad2 + SHA1(MacSaltKey + pad1 + length + data)) */
security_uint32_le(length_le, length); /* length must be little-endian */
/* SHA1_Digest = SHA1(MacSaltKey + pad1 + length + data) */
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, mac_salt_key, 16); /* MacSaltKey */
crypto_sha1_update(sha1, pad1, sizeof(pad1)); /* pad1 */
crypto_sha1_update(sha1, length_le, sizeof(length_le)); /* length */
crypto_sha1_update(sha1, data, length); /* data */
crypto_sha1_final(sha1, sha1_digest);
/* MacData = MD5(MacSaltKey + pad2 + SHA1_Digest) */
md5 = crypto_md5_init();
crypto_md5_update(md5, mac_salt_key, 16); /* MacSaltKey */
crypto_md5_update(md5, pad2, sizeof(pad2)); /* pad2 */
crypto_md5_update(md5, sha1_digest, sizeof(sha1_digest)); /* SHA1_Digest */
crypto_md5_final(md5, output);
}
void security_mac_signature(rdpRdp *rdp, uint8* data, uint32 length, uint8* output)
{
CryptoMd5 md5;
CryptoSha1 sha1;
uint8 length_le[4];
uint8 md5_digest[CRYPTO_MD5_DIGEST_LENGTH];
uint8 sha1_digest[CRYPTO_SHA1_DIGEST_LENGTH];
security_uint32_le(length_le, length); /* length must be little-endian */
/* SHA1_Digest = SHA1(MACKeyN + pad1 + length + data) */
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, rdp->sign_key, rdp->rc4_key_len); /* MacKeyN */
crypto_sha1_update(sha1, pad1, sizeof(pad1)); /* pad1 */
crypto_sha1_update(sha1, length_le, sizeof(length_le)); /* length */
crypto_sha1_update(sha1, data, length); /* data */
crypto_sha1_final(sha1, sha1_digest);
/* MACSignature = First64Bits(MD5(MACKeyN + pad2 + SHA1_Digest)) */
md5 = crypto_md5_init();
crypto_md5_update(md5, rdp->sign_key, rdp->rc4_key_len); /* MacKeyN */
crypto_md5_update(md5, pad2, sizeof(pad2)); /* pad2 */
crypto_md5_update(md5, sha1_digest, sizeof(sha1_digest)); /* SHA1_Digest */
crypto_md5_final(md5, md5_digest);
memcpy(output, md5_digest, 8);
}
void security_salted_mac_signature(rdpRdp *rdp, uint8* data, uint32 length, boolean encryption, uint8* output)
{
CryptoMd5 md5;
CryptoSha1 sha1;
uint8 length_le[4];
uint8 use_count_le[4];
uint8 md5_digest[CRYPTO_MD5_DIGEST_LENGTH];
uint8 sha1_digest[CRYPTO_SHA1_DIGEST_LENGTH];
security_uint32_le(length_le, length); /* length must be little-endian */
if (encryption)
{
security_uint32_le(use_count_le, rdp->encrypt_checksum_use_count);
}
else
{
/*
* We calculate checksum on plain text, so we must have already
* decrypt it, which means decrypt_checksum_use_count is
* off by one.
*/
security_uint32_le(use_count_le, rdp->decrypt_checksum_use_count - 1);
}
/* SHA1_Digest = SHA1(MACKeyN + pad1 + length + data) */
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, rdp->sign_key, rdp->rc4_key_len); /* MacKeyN */
crypto_sha1_update(sha1, pad1, sizeof(pad1)); /* pad1 */
crypto_sha1_update(sha1, length_le, sizeof(length_le)); /* length */
crypto_sha1_update(sha1, data, length); /* data */
crypto_sha1_update(sha1, use_count_le, sizeof(use_count_le)); /* encryptionCount */
crypto_sha1_final(sha1, sha1_digest);
/* MACSignature = First64Bits(MD5(MACKeyN + pad2 + SHA1_Digest)) */
md5 = crypto_md5_init();
crypto_md5_update(md5, rdp->sign_key, rdp->rc4_key_len); /* MacKeyN */
crypto_md5_update(md5, pad2, sizeof(pad2)); /* pad2 */
crypto_md5_update(md5, sha1_digest, sizeof(sha1_digest)); /* SHA1_Digest */
crypto_md5_final(md5, md5_digest);
memcpy(output, md5_digest, 8);
}
static void security_A(uint8* master_secret, uint8* client_random, uint8* server_random, uint8* output)
{
security_premaster_hash("A", 1, master_secret, client_random, server_random, &output[0]);
security_premaster_hash("BB", 2, master_secret, client_random, server_random, &output[16]);
security_premaster_hash("CCC", 3, master_secret, client_random, server_random, &output[32]);
}
static void security_X(uint8* master_secret, uint8* client_random, uint8* server_random, uint8* output)
{
security_premaster_hash("X", 1, master_secret, client_random, server_random, &output[0]);
security_premaster_hash("YY", 2, master_secret, client_random, server_random, &output[16]);
security_premaster_hash("ZZZ", 3, master_secret, client_random, server_random, &output[32]);
}
static void fips_expand_key_bits(uint8* in, uint8* out)
{
uint8 buf[21], c;
int i, b, p, r;
/* reverse every byte in the key */
for (i = 0; i < 21; i++)
buf[i] = fips_reverse_table[in[i]];
/* insert a zero-bit after every 7th bit */
for (i = 0, b = 0; i < 24; i++, b += 7)
{
p = b / 8;
r = b % 8;
if (r == 0)
{
out[i] = buf[p] & 0xfe;
}
else
{
/* c is accumulator */
c = buf[p] << r;
c |= buf[p + 1] >> (8 - r);
out[i] = c & 0xfe;
}
}
/* reverse every byte */
/* alter lsb so the byte has odd parity */
for (i = 0; i < 24; i++)
out[i] = fips_oddparity_table[fips_reverse_table[out[i]]];
}
boolean security_establish_keys(uint8* client_random, rdpRdp* rdp)
{
uint8 pre_master_secret[48];
uint8 master_secret[48];
uint8 session_key_blob[48];
uint8* server_random;
uint8 salt40[] = { 0xD1, 0x26, 0x9E };
rdpSettings* settings;
settings = rdp->settings;
server_random = settings->server_random->data;
if (settings->encryption_method == ENCRYPTION_METHOD_FIPS)
{
CryptoSha1 sha1;
uint8 client_encrypt_key_t[CRYPTO_SHA1_DIGEST_LENGTH + 1];
uint8 client_decrypt_key_t[CRYPTO_SHA1_DIGEST_LENGTH + 1];
printf("FIPS Compliant encryption level.\n");
/* disable fastpath input; it doesnt handle FIPS encryption yet */
rdp->settings->fastpath_input = false;
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, client_random + 16, 16);
crypto_sha1_update(sha1, server_random + 16, 16);
crypto_sha1_final(sha1, client_encrypt_key_t);
client_encrypt_key_t[20] = client_encrypt_key_t[0];
fips_expand_key_bits(client_encrypt_key_t, rdp->fips_encrypt_key);
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, client_random, 16);
crypto_sha1_update(sha1, server_random, 16);
crypto_sha1_final(sha1, client_decrypt_key_t);
client_decrypt_key_t[20] = client_decrypt_key_t[0];
fips_expand_key_bits(client_decrypt_key_t, rdp->fips_decrypt_key);
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, client_decrypt_key_t, 20);
crypto_sha1_update(sha1, client_encrypt_key_t, 20);
crypto_sha1_final(sha1, rdp->fips_sign_key);
}
memcpy(pre_master_secret, client_random, 24);
memcpy(pre_master_secret + 24, server_random, 24);
security_A(pre_master_secret, client_random, server_random, master_secret);
security_X(master_secret, client_random, server_random, session_key_blob);
memcpy(rdp->sign_key, session_key_blob, 16);
if (rdp->settings->server_mode) {
security_md5_16_32_32(&session_key_blob[16], client_random,
server_random, rdp->encrypt_key);
security_md5_16_32_32(&session_key_blob[32], client_random,
server_random, rdp->decrypt_key);
} else {
security_md5_16_32_32(&session_key_blob[16], client_random,
server_random, rdp->decrypt_key);
security_md5_16_32_32(&session_key_blob[32], client_random,
server_random, rdp->encrypt_key);
}
if (settings->encryption_method == 1) /* 40 and 56 bit */
{
memcpy(rdp->sign_key, salt40, 3); /* TODO 56 bit */
memcpy(rdp->decrypt_key, salt40, 3); /* TODO 56 bit */
memcpy(rdp->encrypt_key, salt40, 3); /* TODO 56 bit */
rdp->rc4_key_len = 8;
}
else if (settings->encryption_method == 2) /* 128 bit */
{
rdp->rc4_key_len = 16;
}
memcpy(rdp->decrypt_update_key, rdp->decrypt_key, 16);
memcpy(rdp->encrypt_update_key, rdp->encrypt_key, 16);
rdp->decrypt_use_count = 0;
rdp->decrypt_checksum_use_count = 0;
rdp->encrypt_use_count =0;
rdp->encrypt_checksum_use_count =0;
return true;
}
boolean security_key_update(uint8* key, uint8* update_key, int key_len)
{
uint8 sha1h[CRYPTO_SHA1_DIGEST_LENGTH];
CryptoMd5 md5;
CryptoSha1 sha1;
CryptoRc4 rc4;
uint8 salt40[] = { 0xD1, 0x26, 0x9E };
sha1 = crypto_sha1_init();
crypto_sha1_update(sha1, update_key, key_len);
crypto_sha1_update(sha1, pad1, sizeof(pad1));
crypto_sha1_update(sha1, key, key_len);
crypto_sha1_final(sha1, sha1h);
md5 = crypto_md5_init();
crypto_md5_update(md5, update_key, key_len);
crypto_md5_update(md5, pad2, sizeof(pad2));
crypto_md5_update(md5, sha1h, sizeof(sha1h));
crypto_md5_final(md5, key);
rc4 = crypto_rc4_init(key, key_len);
crypto_rc4(rc4, key_len, key, key);
crypto_rc4_free(rc4);
if (key_len == 8)
memcpy(key, salt40, 3); /* TODO 56 bit */
return true;
}
boolean security_encrypt(uint8* data, int length, rdpRdp* rdp)
{
if (rdp->encrypt_use_count >= 4096)
{
security_key_update(rdp->encrypt_key, rdp->encrypt_update_key, rdp->rc4_key_len);
crypto_rc4_free(rdp->rc4_encrypt_key);
rdp->rc4_encrypt_key = crypto_rc4_init(rdp->encrypt_key, rdp->rc4_key_len);
rdp->encrypt_use_count = 0;
}
crypto_rc4(rdp->rc4_encrypt_key, length, data, data);
rdp->encrypt_use_count++;
rdp->encrypt_checksum_use_count++;
return true;
}
boolean security_decrypt(uint8* data, int length, rdpRdp* rdp)
{
if (rdp->decrypt_key == NULL)
return false;
if (rdp->decrypt_use_count >= 4096)
{
security_key_update(rdp->decrypt_key, rdp->decrypt_update_key, rdp->rc4_key_len);
crypto_rc4_free(rdp->rc4_decrypt_key);
rdp->rc4_decrypt_key = crypto_rc4_init(rdp->decrypt_key, rdp->rc4_key_len);
rdp->decrypt_use_count = 0;
}
crypto_rc4(rdp->rc4_decrypt_key, length, data, data);
rdp->decrypt_use_count += 1;
rdp->decrypt_checksum_use_count++;
return true;
}
void security_hmac_signature(uint8* data, int length, uint8* output, rdpRdp* rdp)
{
uint8 buf[20];
uint8 use_count_le[4];
security_uint32_le(use_count_le, rdp->encrypt_use_count);
crypto_hmac_sha1_init(rdp->fips_hmac, rdp->fips_sign_key, 20);
crypto_hmac_update(rdp->fips_hmac, data, length);
crypto_hmac_update(rdp->fips_hmac, use_count_le, 4);
crypto_hmac_final(rdp->fips_hmac, buf, 20);
memmove(output, buf, 8);
}
boolean security_fips_encrypt(uint8* data, int length, rdpRdp* rdp)
{
crypto_des3_encrypt(rdp->fips_encrypt, length, data, data);
rdp->encrypt_use_count++;
return true;
}
boolean security_fips_decrypt(uint8* data, int length, rdpRdp* rdp)
{
crypto_des3_decrypt(rdp->fips_decrypt, length, data, data);
return true;
}
boolean security_fips_check_signature(uint8* data, int length, uint8* sig, rdpRdp* rdp)
{
uint8 buf[20];
uint8 use_count_le[4];
security_uint32_le(use_count_le, rdp->decrypt_use_count);
crypto_hmac_sha1_init(rdp->fips_hmac, rdp->fips_sign_key, 20);
crypto_hmac_update(rdp->fips_hmac, data, length);
crypto_hmac_update(rdp->fips_hmac, use_count_le, 4);
crypto_hmac_final(rdp->fips_hmac, buf, 20);
rdp->decrypt_use_count++;
if (memcmp(sig, buf, 8))
return false;
return true;
}
| mister-r/FreeRDP | libfreerdp-core/security.c | C | apache-2.0 | 18,889 |
./clean.sh
javac -d build/modules \
--module-source-path src \
`find src -name "*.java"`
jar --create --file=mlib/[email protected] \
--module-version=1.0 -C build/modules/com.acme.bids.db .
jar --create --file=mlib/[email protected] \
--module-version=1.0 \
--main-class=com.acme.bids.service.api.UserService \
-C build/modules/com.acme.bids.service .
jar --create --file=mlib/[email protected] \
--module-version=1.0 \
--main-class=com.acme.bids.app.App \
-C build/modules/com.acme.bids.app .
| codetojoy/talk_maritimedevcon_java_9_modules | eg_05_java_9_jlink/compile.sh | Shell | apache-2.0 | 531 |
/**
* Copyright 2020 LinkedIn Corp. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*/
package com.github.ambry.accountstats;
import com.codahale.metrics.MetricRegistry;
import com.github.ambry.config.AccountStatsMySqlConfig;
import com.github.ambry.config.ClusterMapConfig;
import com.github.ambry.config.VerifiableProperties;
import com.github.ambry.server.HostAccountStorageStatsWrapper;
import com.github.ambry.server.HostPartitionClassStorageStatsWrapper;
import com.github.ambry.server.StatsHeader;
import com.github.ambry.server.StatsReportType;
import com.github.ambry.server.StatsSnapshot;
import com.github.ambry.server.StatsWrapper;
import com.github.ambry.server.StorageStatsUtil;
import com.github.ambry.server.StorageStatsUtilTest;
import com.github.ambry.server.storagestats.AggregatedAccountStorageStats;
import com.github.ambry.server.storagestats.AggregatedPartitionClassStorageStats;
import com.github.ambry.server.storagestats.ContainerStorageStats;
import com.github.ambry.server.storagestats.HostAccountStorageStats;
import com.github.ambry.server.storagestats.HostPartitionClassStorageStats;
import com.github.ambry.utils.Pair;
import com.github.ambry.utils.TestUtils;
import com.github.ambry.utils.Utils;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Statement;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Random;
import java.util.Set;
import java.util.stream.Collectors;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.junit.runners.Parameterized;
import static org.junit.Assert.*;
/**
* Integration tests for {@link AccountStatsMySqlStore}.
*/
@RunWith(Parameterized.class)
public class AccountStatsMySqlStoreIntegrationTest {
private static final String clusterName1 = "Ambry-test";
private static final String clusterName2 = "Ambry-random";
// hostname1 and hostname2 are the same, but with different port numbers
private static final String hostname1 = "ambry1.test.github.com";
private static final String hostname2 = "ambry1.test.github.com";
private static final String hostname3 = "ambry3.test.github.com";
private static final int port1 = 12345;
private static final int port2 = 12346;
private static final int port3 = 12347;
private final int batchSize;
private final AccountStatsMySqlStore mySqlStore;
@Parameterized.Parameters
public static List<Object[]> data() {
return Arrays.asList(new Object[][]{{0}, {17}});
}
public AccountStatsMySqlStoreIntegrationTest(int batchSize) throws Exception {
this.batchSize = batchSize;
mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
}
@Before
public void before() throws Exception {
mySqlStore.cleanupTables();
}
@After
public void after() {
mySqlStore.shutdown();
}
/**
* Tests to store multiple stats for multiple hosts and recover stats from database.
* @throws Exception
*/
@Test
public void testMultiStoreStats() throws Exception {
AccountStatsMySqlStore mySqlStore1 = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
AccountStatsMySqlStore mySqlStore2 = createAccountStatsMySqlStore(clusterName1, hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Generating StatsWrappers, store StatsWrappers and retrieve StatsWrappers
StatsWrapper stats1 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
StatsWrapper stats2 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
StatsWrapper stats3 = generateStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore1.storeAccountStats(stats1);
mySqlStore2.storeAccountStats(stats2);
mySqlStore3.storeAccountStats(stats3);
assertTableSize(mySqlStore1, 3 * 10 * 10);
StatsWrapper obtainedStats1 = mySqlStore1.queryAccountStatsByHost(hostname1, port1);
StatsWrapper obtainedStats2 = mySqlStore2.queryAccountStatsByHost(hostname2, port2);
StatsWrapper obtainedStats3 = mySqlStore3.queryAccountStatsByHost(hostname3, port3);
assertTwoStatsSnapshots(obtainedStats1.getSnapshot(), stats1.getSnapshot());
assertTwoStatsSnapshots(obtainedStats2.getSnapshot(), stats2.getSnapshot());
assertTwoStatsSnapshots(obtainedStats3.getSnapshot(), stats3.getSnapshot());
// Generating HostAccountStorageStatsWrappers, store and retrieve them
HostAccountStorageStatsWrapper hostStats1 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
HostAccountStorageStatsWrapper hostStats2 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
HostAccountStorageStatsWrapper hostStats3 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore1.storeHostAccountStorageStats(hostStats1);
mySqlStore2.storeHostAccountStorageStats(hostStats2);
mySqlStore3.storeHostAccountStorageStats(hostStats3);
HostAccountStorageStatsWrapper obtainedHostStats1 =
mySqlStore1.queryHostAccountStorageStatsByHost(hostname1, port1);
HostAccountStorageStatsWrapper obtainedHostStats2 =
mySqlStore2.queryHostAccountStorageStatsByHost(hostname2, port2);
HostAccountStorageStatsWrapper obtainedHostStats3 =
mySqlStore3.queryHostAccountStorageStatsByHost(hostname3, port3);
assertEquals(hostStats1.getStats().getStorageStats(), obtainedHostStats1.getStats().getStorageStats());
assertEquals(hostStats2.getStats().getStorageStats(), obtainedHostStats2.getStats().getStorageStats());
assertEquals(hostStats3.getStats().getStorageStats(), obtainedHostStats3.getStats().getStorageStats());
// Retrieve StatWrappers
obtainedStats1 = mySqlStore1.queryAccountStatsByHost(hostname1, port1);
StatsSnapshot converted =
StorageStatsUtil.convertHostAccountStorageStatsToStatsSnapshot(hostStats1.getStats(), false);
assertTwoStatsSnapshots(converted, obtainedStats1.getSnapshot());
mySqlStore1.shutdown();
mySqlStore2.shutdown();
mySqlStore3.shutdown();
}
@Test
public void testEmptyStatsWhenReadingPreviousStatsFromMysqlDb() throws Exception {
//write a new stats into database.
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats =
generateHostAccountStorageStatsWrapper(1, 1, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats);
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats.getStats().getStorageStats().containsKey((long) 0));
//initialized the mySqlStore and write a new stats with the same partition.
mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
assertTrue(
mySqlStore.getPreviousHostAccountStorageStatsWrapper().getStats().getStorageStats().containsKey((long) 0));
HostAccountStorageStatsWrapper stats2 =
generateHostAccountStorageStatsWrapper(0, 0, 0, StatsReportType.ACCOUNT_REPORT);
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStats =
new HashMap<>(stats2.getStats().getStorageStats());
newStorageStats.put((long) 0,
new HashMap<>()); // Remove partition 0's storage stats data, this would remove entire partition from database
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats2.getHeader(), new HostAccountStorageStats(newStorageStats)));
// empty stats should remove all the data in the database
obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertFalse(obtainedStats.getStats().getStorageStats().containsKey((long) 0));
}
@Test
public void testEmptyStatsWhenReadingPreviousStatsFromLocalBackUpFile() throws Exception {
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStats =
new HashMap<>(stats.getStats().getStorageStats());
newStorageStats.put((long) 10, new HashMap<>());
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats.getHeader(), new HostAccountStorageStats(newStorageStats)));
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertFalse(obtainedStats.getStats().getStorageStats().containsKey((long) 10));
// Write a new stats with partition 10 still empty
HostAccountStorageStatsWrapper stats2 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
newStorageStats = new HashMap<>(stats.getStats().getStorageStats());
newStorageStats.put((long) 10, new HashMap<>());
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats2.getHeader(), new HostAccountStorageStats(newStorageStats)));
HostAccountStorageStatsWrapper obtainedStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertFalse(obtainedStats2.getStats().getStorageStats().containsKey((long) 10));
// Write a new stats with partition 10 not empty
HostAccountStorageStatsWrapper stats3 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
newStorageStats = new HashMap<>(stats.getStats().getStorageStats());
newStorageStats.put((long) 10, stats.getStats().getStorageStats().get((long) 1));
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats3.getHeader(), new HostAccountStorageStats(newStorageStats)));
HostAccountStorageStatsWrapper obtainedStats3 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats3.getStats().getStorageStats().containsKey((long) 10));
// Write an empty HostAccountStorageStats
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats3.getHeader(), new HostAccountStorageStats()));
// Empty storage stats should remove all the data in the database
HostAccountStorageStatsWrapper obtainedStats4 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats4.getStats().getStorageStats().isEmpty());
// Write an empty HostAccountStorageStats again
mySqlStore.storeHostAccountStorageStats(
new HostAccountStorageStatsWrapper(stats3.getHeader(), new HostAccountStorageStats()));
HostAccountStorageStatsWrapper obtainedStats5 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertTrue(obtainedStats5.getStats().getStorageStats().isEmpty());
HostAccountStorageStatsWrapper stats6 =
generateHostAccountStorageStatsWrapper(20, 20, 20, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats6);
HostAccountStorageStatsWrapper obtainedStats6 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats6.getStats().getStorageStats(), stats6.getStats().getStorageStats());
mySqlStore.shutdown();
}
/**
* Test to delete partition, account and container data from database
* @throws Exception
*/
@Test
public void testStatsDeletePartitionAccountContainer() throws Exception {
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats =
generateHostAccountStorageStatsWrapper(10, 10, 10, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats);
// Now remove one partition from stats
HostAccountStorageStats storageStatsCopy = new HostAccountStorageStats(stats.getStats());
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStatsMap =
new HashMap<>(storageStatsCopy.getStorageStats());
newStorageStatsMap.remove((long) 1);
HostAccountStorageStatsWrapper stats2 = new HostAccountStorageStatsWrapper(new StatsHeader(stats.getHeader()),
new HostAccountStorageStats(newStorageStatsMap));
mySqlStore.storeHostAccountStorageStats(stats2);
HostAccountStorageStatsWrapper obtainedStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats2.getStats().getStorageStats(), stats2.getStats().getStorageStats());
// Now remove one account from stats
storageStatsCopy = new HostAccountStorageStats(stats2.getStats());
newStorageStatsMap = new HashMap<>(storageStatsCopy.getStorageStats());
newStorageStatsMap.get((long) 3).remove((short) 1);
HostAccountStorageStatsWrapper stats3 = new HostAccountStorageStatsWrapper(new StatsHeader(stats2.getHeader()),
new HostAccountStorageStats(newStorageStatsMap));
mySqlStore.storeHostAccountStorageStats(stats3);
HostAccountStorageStatsWrapper obtainedStats3 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats3.getStats().getStorageStats(), stats3.getStats().getStorageStats());
// Now remove some containers
storageStatsCopy = new HostAccountStorageStats(stats3.getStats());
newStorageStatsMap = new HashMap<>(storageStatsCopy.getStorageStats());
for (short containerId : new short[]{0, 1, 2}) {
newStorageStatsMap.get((long) 3).get((short) 3).remove(containerId);
}
HostAccountStorageStatsWrapper stats4 = new HostAccountStorageStatsWrapper(new StatsHeader(stats3.getHeader()),
new HostAccountStorageStats(newStorageStatsMap));
mySqlStore.storeHostAccountStorageStats(stats4);
HostAccountStorageStatsWrapper obtainedStats4 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(obtainedStats4.getStats().getStorageStats(), stats4.getStats().getStorageStats());
// Now write the stats back
stats = generateHostAccountStorageStatsWrapper(10, 10, 10, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats);
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
mySqlStore.shutdown();
}
/**
* Tests to store multiple stats for one hosts and recover stats from database.
* @throws Exception
*/
@Test
public void testStoreMultilpleWrites() throws Exception {
AccountStatsMySqlStore mySqlStore = createAccountStatsMySqlStore(clusterName1, hostname1, port1);
HostAccountStorageStatsWrapper stats1 =
generateHostAccountStorageStatsWrapper(10, 10, 1, StatsReportType.ACCOUNT_REPORT);
mySqlStore.storeHostAccountStorageStats(stats1);
HostAccountStorageStats hostAccountStorageStatsCopy = new HostAccountStorageStats(stats1.getStats());
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStats =
new HashMap<>(hostAccountStorageStatsCopy.getStorageStats());
ContainerStorageStats origin = newStorageStats.get((long) 0).get((short) 0).get((short) 0);
newStorageStats.get((long) 0)
.get((short) 0)
.put((short) 0,
new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
HostAccountStorageStatsWrapper stats2 = new HostAccountStorageStatsWrapper(new StatsHeader(stats1.getHeader()),
new HostAccountStorageStats(newStorageStats));
mySqlStore.storeHostAccountStorageStats(stats2);
HostAccountStorageStatsWrapper obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats2.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
hostAccountStorageStatsCopy = new HostAccountStorageStats(stats1.getStats());
newStorageStats = new HashMap<>(hostAccountStorageStatsCopy.getStorageStats());
origin = newStorageStats.get((long) 0).get((short) 0).get((short) 0);
newStorageStats.get((long) 0)
.get((short) 0)
.put((short) 0,
new ContainerStorageStats.Builder(origin).physicalStorageUsage(origin.getPhysicalStorageUsage() + 1)
.build());
HostAccountStorageStatsWrapper stats3 = new HostAccountStorageStatsWrapper(new StatsHeader(stats1.getHeader()),
new HostAccountStorageStats(newStorageStats));
mySqlStore.storeHostAccountStorageStats(stats3);
obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats3.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
hostAccountStorageStatsCopy = new HostAccountStorageStats(stats1.getStats());
newStorageStats = new HashMap<>(hostAccountStorageStatsCopy.getStorageStats());
origin = newStorageStats.get((long) 0).get((short) 0).get((short) 0);
newStorageStats.get((long) 0)
.get((short) 0)
.put((short) 0, new ContainerStorageStats.Builder(origin).numberOfBlobs(origin.getNumberOfBlobs() + 1).build());
HostAccountStorageStatsWrapper stats4 = new HostAccountStorageStatsWrapper(new StatsHeader(stats1.getHeader()),
new HostAccountStorageStats(newStorageStats));
mySqlStore.storeHostAccountStorageStats(stats4);
obtainedStats = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
assertEquals(stats4.getStats().getStorageStats(), obtainedStats.getStats().getStorageStats());
mySqlStore.shutdown();
}
/**
* Test the methods for storing, deleting and fetch aggregated account stats.
* @throws Exception
*/
@Test
public void testAggregatedAccountStats() throws Exception {
Map<String, Map<String, Long>> containerStorageUsages = TestUtils.makeStorageMap(10, 10, 100000, 1000);
StatsSnapshot snapshot = TestUtils.makeAccountStatsSnapshotFromContainerStorageMap(containerStorageUsages);
mySqlStore.storeAggregatedAccountStats(snapshot);
Map<String, Map<String, Long>> obtainedContainerStorageUsages = mySqlStore.queryAggregatedAccountStats(false);
assertEquals(containerStorageUsages, obtainedContainerStorageUsages);
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName1);
assertEquals(snapshot, obtainedSnapshot);
// Fetching aggregated account stats for clustername2 should result in empty stats
assertEquals(mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName2).getSubMap().size(), 0);
// Change one value and store it to mysql database again
StatsSnapshot newSnapshot = new StatsSnapshot(snapshot);
newSnapshot.getSubMap()
.get(Utils.statsAccountKey((short) 1))
.getSubMap()
.get(Utils.statsContainerKey((short) 1))
.setValue(1);
newSnapshot.updateValue();
containerStorageUsages.get("1").put("1", 1L);
mySqlStore.storeAggregatedAccountStats(newSnapshot);
obtainedContainerStorageUsages = mySqlStore.queryAggregatedAccountStats(false);
assertEquals(containerStorageUsages, obtainedContainerStorageUsages);
// Delete account and container
newSnapshot = new StatsSnapshot(newSnapshot);
newSnapshot.getSubMap().remove(Utils.statsAccountKey((short) 1));
newSnapshot.getSubMap()
.get(Utils.statsAccountKey((short) 2))
.getSubMap()
.remove(Utils.statsContainerKey((short) 1));
newSnapshot.updateValue();
// Now remove all containers for account 1 and container 1 of account 2
for (String containerId : containerStorageUsages.get(String.valueOf(1)).keySet()) {
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 1, Short.valueOf(containerId));
}
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 2, (short) 1);
obtainedSnapshot = mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName1);
assertEquals(newSnapshot, obtainedSnapshot);
mySqlStore.shutdown();
}
/**
* Test the methods for storing, deleting and fetch aggregated account storage stats.
* @throws Exception
*/
@Test
public void testAggregatedAccountStorageStats() throws Exception {
AggregatedAccountStorageStats aggregatedAccountStorageStats = new AggregatedAccountStorageStats(
StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedAccountStorageStats(aggregatedAccountStorageStats);
// Compare container usage map
Map<String, Map<String, Long>> obtainedContainerStorageUsages = mySqlStore.queryAggregatedAccountStats(false);
assertEquals(StorageStatsUtil.convertAggregatedAccountStorageStatsToMap(aggregatedAccountStorageStats, false),
obtainedContainerStorageUsages);
// Compare StatsSnapshot
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName1);
assertEquals(
StorageStatsUtil.convertAggregatedAccountStorageStatsToStatsSnapshot(aggregatedAccountStorageStats, false),
obtainedSnapshot);
// Compare AggregatedAccountStorageStats
AggregatedAccountStorageStats obtainedStats = mySqlStore.queryAggregatedAccountStorageStats();
assertEquals(aggregatedAccountStorageStats.getStorageStats(), obtainedStats.getStorageStats());
obtainedStats = mySqlStore.queryAggregatedAccountStorageStatsByClusterName(clusterName1);
assertEquals(aggregatedAccountStorageStats.getStorageStats(), obtainedStats.getStorageStats());
// Fetching aggregated account stats for clustername2 should result in a null;
assertEquals(mySqlStore.queryAggregatedAccountStatsByClusterName(clusterName2).getSubMap().size(), 0);
assertEquals(mySqlStore.queryAggregatedAccountStorageStatsByClusterName(clusterName2).getStorageStats().size(), 0);
// Change one value and store it to mysql database again
Map<Short, Map<Short, ContainerStorageStats>> newStorageStatsMap =
new HashMap<>(aggregatedAccountStorageStats.getStorageStats());
ContainerStorageStats origin = newStorageStatsMap.get((short) 1).get((short) 1);
newStorageStatsMap.get((short) 1)
.put((short) 1,
new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
aggregatedAccountStorageStats = new AggregatedAccountStorageStats(newStorageStatsMap);
mySqlStore.storeAggregatedAccountStorageStats(aggregatedAccountStorageStats);
obtainedStats = mySqlStore.queryAggregatedAccountStorageStats();
assertEquals(newStorageStatsMap, obtainedStats.getStorageStats());
// Delete account and container
newStorageStatsMap = new HashMap<>(aggregatedAccountStorageStats.getStorageStats());
newStorageStatsMap.remove((short) 1);
newStorageStatsMap.get((short) 2).remove((short) 1);
// Now remove all containers for account 1 and container 1 of account 2
for (short containerId : aggregatedAccountStorageStats.getStorageStats().get((short) 1).keySet()) {
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 1, containerId);
}
mySqlStore.deleteAggregatedAccountStatsForContainer((short) 2, (short) 1);
obtainedStats = mySqlStore.queryAggregatedAccountStorageStatsByClusterName(clusterName1);
assertEquals(newStorageStatsMap, obtainedStats.getStorageStats());
mySqlStore.shutdown();
}
/**
* Test methods to store, delete and fetch monthly aggregated stats
* @throws Exception
*/
@Test
public void testMonthlyAggregatedStats() throws Exception {
String monthValue = "2020-01";
AggregatedAccountStorageStats currentAggregatedStats = mySqlStore.queryAggregatedAccountStorageStats();
if (currentAggregatedStats.getStorageStats().size() == 0) {
AggregatedAccountStorageStats aggregatedAccountStorageStats = new AggregatedAccountStorageStats(
StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedAccountStorageStats(aggregatedAccountStorageStats);
currentAggregatedStats = mySqlStore.queryAggregatedAccountStorageStats();
}
// fetch the month and it should return emtpy string
Assert.assertEquals("", mySqlStore.queryRecordedMonth());
mySqlStore.takeSnapshotOfAggregatedAccountStatsAndUpdateMonth(monthValue);
Map<String, Map<String, Long>> monthlyContainerStorageUsages = mySqlStore.queryMonthlyAggregatedAccountStats(false);
assertEquals(StorageStatsUtil.convertAggregatedAccountStorageStatsToMap(currentAggregatedStats, false),
monthlyContainerStorageUsages);
String obtainedMonthValue = mySqlStore.queryRecordedMonth();
assertTrue(obtainedMonthValue.equals(monthValue));
// Change the value and store it back to mysql database
monthValue = "2020-02";
currentAggregatedStats = new AggregatedAccountStorageStats(
StorageStatsUtilTest.generateRandomAggregatedAccountStorageStats((short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedAccountStorageStats(currentAggregatedStats);
mySqlStore.takeSnapshotOfAggregatedAccountStatsAndUpdateMonth(monthValue);
monthlyContainerStorageUsages = mySqlStore.queryMonthlyAggregatedAccountStats(false);
assertEquals(StorageStatsUtil.convertAggregatedAccountStorageStatsToMap(currentAggregatedStats, false),
monthlyContainerStorageUsages);
obtainedMonthValue = mySqlStore.queryRecordedMonth();
assertTrue(obtainedMonthValue.equals(monthValue));
// Delete the snapshots
mySqlStore.deleteSnapshotOfAggregatedAccountStats();
assertTrue(mySqlStore.queryMonthlyAggregatedAccountStats(false).isEmpty());
}
/**
* Test methods to store and fetch partition class, partition name partition id and partition class stats.
* @throws Exception
*/
@Test
public void testHostPartitionClassStats() throws Exception {
// First write some stats to account reports
testMultiStoreStats();
StatsWrapper accountStats1 = mySqlStore.queryAccountStatsByHost(hostname1, port1);
StatsWrapper accountStats2 = mySqlStore.queryAccountStatsByHost(hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
StatsWrapper accountStats3 = mySqlStore3.queryAccountStatsByHost(hostname3, port3);
// From this account stats, create partition class stats;
Set<String> allPartitionKeys = new HashSet<String>() {
{
addAll(accountStats1.getSnapshot().getSubMap().keySet());
addAll(accountStats2.getSnapshot().getSubMap().keySet());
addAll(accountStats3.getSnapshot().getSubMap().keySet());
}
};
List<String> partitionClassNames = Arrays.asList("default", "new");
Map<String, String> partitionKeyToClassName = new HashMap<>();
int ind = 0;
for (String partitionKey : allPartitionKeys) {
partitionKeyToClassName.put(partitionKey, partitionClassNames.get(ind % partitionClassNames.size()));
ind++;
}
StatsWrapper partitionClassStats1 =
convertAccountStatsToPartitionClassStats(accountStats1, partitionKeyToClassName);
StatsWrapper partitionClassStats2 =
convertAccountStatsToPartitionClassStats(accountStats2, partitionKeyToClassName);
StatsWrapper partitionClassStats3 =
convertAccountStatsToPartitionClassStats(accountStats3, partitionKeyToClassName);
mySqlStore.storePartitionClassStats(partitionClassStats1);
mySqlStore.storePartitionClassStats(partitionClassStats2);
mySqlStore3.storePartitionClassStats(partitionClassStats3);
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
assertEquals(new HashSet<>(partitionClassNames), partitionNameAndIds.keySet());
Map<String, String> dbPartitionKeyToClassName = partitionNameAndIds.entrySet()
.stream()
.flatMap(
ent -> ent.getValue().stream().map(pid -> new Pair<String, String>(ent.getKey(), "Partition[" + pid + "]")))
.collect(Collectors.toMap(Pair::getSecond, Pair::getFirst));
assertEquals(partitionKeyToClassName, dbPartitionKeyToClassName);
StatsWrapper obtainedStats1 = mySqlStore.queryPartitionClassStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(partitionClassStats1.getSnapshot(), obtainedStats1.getSnapshot());
StatsWrapper obtainedStats2 = mySqlStore.queryPartitionClassStatsByHost(hostname2, port2, partitionNameAndIds);
assertEquals(partitionClassStats2.getSnapshot(), obtainedStats2.getSnapshot());
StatsWrapper obtainedStats3 = mySqlStore3.queryPartitionClassStatsByHost(hostname3, port3, partitionNameAndIds);
assertEquals(partitionClassStats3.getSnapshot(), obtainedStats3.getSnapshot());
mySqlStore3.shutdown();
}
/**
* Test methods to store and fetch partition class, partition name partition id and partition class storage stats.
* @throws Exception
*/
@Test
public void testHostPartitionClassStorageStats() throws Exception {
// First write some stats to account reports
testMultiStoreStats();
HostAccountStorageStatsWrapper accountStats1 = mySqlStore.queryHostAccountStorageStatsByHost(hostname1, port1);
HostAccountStorageStatsWrapper accountStats2 = mySqlStore.queryHostAccountStorageStatsByHost(hostname2, port2);
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
HostAccountStorageStatsWrapper accountStats3 = mySqlStore3.queryHostAccountStorageStatsByHost(hostname3, port3);
// From this account stats, create partition class stats;
Set<Long> allPartitionKeys = new HashSet<Long>() {
{
addAll(accountStats1.getStats().getStorageStats().keySet());
addAll(accountStats2.getStats().getStorageStats().keySet());
addAll(accountStats3.getStats().getStorageStats().keySet());
}
};
List<String> partitionClassNames = Arrays.asList("default", "new");
Map<Long, String> partitionIdToClassName = new HashMap<>();
int ind = 0;
for (long partitionId : allPartitionKeys) {
partitionIdToClassName.put(partitionId, partitionClassNames.get(ind % partitionClassNames.size()));
ind++;
}
HostPartitionClassStorageStatsWrapper partitionClassStats1 =
convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats1, partitionIdToClassName);
HostPartitionClassStorageStatsWrapper partitionClassStats2 =
convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats2, partitionIdToClassName);
HostPartitionClassStorageStatsWrapper partitionClassStats3 =
convertHostAccountStorageStatsToHostPartitionClassStorageStats(accountStats3, partitionIdToClassName);
mySqlStore.storeHostPartitionClassStorageStats(partitionClassStats1);
mySqlStore.storeHostPartitionClassStorageStats(partitionClassStats2);
mySqlStore3.storeHostPartitionClassStorageStats(partitionClassStats3);
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
assertEquals(new HashSet<>(partitionClassNames), partitionNameAndIds.keySet());
Map<Long, String> dbPartitionKeyToClassName = partitionNameAndIds.entrySet()
.stream()
.flatMap(ent -> ent.getValue().stream().map(pid -> new Pair<>(ent.getKey(), (long) pid)))
.collect(Collectors.toMap(Pair::getSecond, Pair::getFirst));
assertEquals(partitionIdToClassName, dbPartitionKeyToClassName);
// Fetch HostPartitionClassStorageStats
HostPartitionClassStorageStatsWrapper obtainedStats1 =
mySqlStore.queryHostPartitionClassStorageStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(partitionClassStats1.getStats().getStorageStats(), obtainedStats1.getStats().getStorageStats());
HostPartitionClassStorageStatsWrapper obtainedStats2 =
mySqlStore.queryHostPartitionClassStorageStatsByHost(hostname2, port2, partitionNameAndIds);
assertEquals(partitionClassStats2.getStats().getStorageStats(), obtainedStats2.getStats().getStorageStats());
HostPartitionClassStorageStatsWrapper obtainedStats3 =
mySqlStore3.queryHostPartitionClassStorageStatsByHost(hostname3, port3, partitionNameAndIds);
assertEquals(partitionClassStats3.getStats().getStorageStats(), obtainedStats3.getStats().getStorageStats());
// Fetch StatsSnapshot
StatsWrapper obtainedStats = mySqlStore.queryPartitionClassStatsByHost(hostname1, port1, partitionNameAndIds);
assertEquals(
StorageStatsUtil.convertHostPartitionClassStorageStatsToStatsSnapshot(obtainedStats1.getStats(), false),
obtainedStats.getSnapshot());
mySqlStore3.shutdown();
}
/**
* Test methods to store, delete and fetch aggregated partition class stats.
* @throws Exception
*/
@Test
public void testAggregatedPartitionClassStats() throws Exception {
testHostPartitionClassStats();
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Now we should have partition class names and partition ids in database
// Construct an aggregated partition class report
StatsSnapshot aggregated =
TestUtils.makeAggregatedPartitionClassStats(partitionNameAndIds.keySet().toArray(new String[0]), 10, 10);
mySqlStore.storeAggregatedPartitionClassStats(aggregated);
partitionNameAndIds = mySqlStore3.queryPartitionNameAndIds();
StatsSnapshot aggregated3 =
TestUtils.makeAggregatedPartitionClassStats(partitionNameAndIds.keySet().toArray(new String[0]), 10, 10);
mySqlStore3.storeAggregatedPartitionClassStats(aggregated3);
StatsSnapshot obtained = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(aggregated, obtained);
assertEquals(mySqlStore.queryAggregatedPartitionClassStatsByClusterName("random-cluster").getSubMap().size(), 0);
StatsSnapshot obtained3 = mySqlStore3.queryAggregatedPartitionClassStats();
assertEquals(aggregated3, obtained3);
// Change one value and store it to mysql database again
StatsSnapshot newSnapshot = new StatsSnapshot(aggregated);
newSnapshot.getSubMap()
.get("default")
.getSubMap()
.get(Utils.partitionClassStatsAccountContainerKey((short) 1, (short) 1))
.setValue(1);
newSnapshot.updateValue();
mySqlStore.storeAggregatedPartitionClassStats(aggregated);
obtained = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(aggregated, obtained);
// Delete some account and container
newSnapshot = new StatsSnapshot(newSnapshot);
short accountId = (short) 1;
short containerId = (short) 1;
String accountContainerKey = Utils.partitionClassStatsAccountContainerKey(accountId, containerId);
for (String partitionClassName : partitionNameAndIds.keySet()) {
mySqlStore.deleteAggregatedPartitionClassStatsForAccountContainer(partitionClassName, accountId, containerId);
newSnapshot.getSubMap().get(partitionClassName).getSubMap().remove(accountContainerKey);
}
newSnapshot.updateValue();
obtained = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(newSnapshot, obtained);
mySqlStore3.shutdown();
}
@Test
public void testAggregatedPartitionClassStorageStats() throws Exception {
testHostPartitionClassStorageStats();
Map<String, Set<Integer>> partitionNameAndIds = mySqlStore.queryPartitionNameAndIds();
AccountStatsMySqlStore mySqlStore3 = createAccountStatsMySqlStore(clusterName2, hostname3, port3);
// Now we should have partition class names and partition ids in database
// Construct an aggregated partition class report
AggregatedPartitionClassStorageStats aggregatedStats = new AggregatedPartitionClassStorageStats(
StorageStatsUtilTest.generateRandomAggregatedPartitionClassStorageStats(
partitionNameAndIds.keySet().toArray(new String[0]), (short) 0, 10, 10, 10000L, 2, 10));
mySqlStore.storeAggregatedPartitionClassStorageStats(aggregatedStats);
partitionNameAndIds = mySqlStore3.queryPartitionNameAndIds();
AggregatedPartitionClassStorageStats aggregatedStats3 = new AggregatedPartitionClassStorageStats(
StorageStatsUtilTest.generateRandomAggregatedPartitionClassStorageStats(
partitionNameAndIds.keySet().toArray(new String[0]), (short) 0, 10, 10, 10000L, 2, 10));
mySqlStore3.storeAggregatedPartitionClassStorageStats(aggregatedStats3);
AggregatedPartitionClassStorageStats obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(aggregatedStats.getStorageStats(), obtained.getStorageStats());
assertEquals(
mySqlStore.queryAggregatedPartitionClassStorageStatsByClusterName("random-cluster").getStorageStats().size(),
0);
AggregatedPartitionClassStorageStats obtained3 = mySqlStore3.queryAggregatedPartitionClassStorageStats();
assertEquals(aggregatedStats3.getStorageStats(), obtained3.getStorageStats());
// Fetch StatsSnapshot
StatsSnapshot obtainedSnapshot = mySqlStore.queryAggregatedPartitionClassStats();
assertEquals(StorageStatsUtil.convertAggregatedPartitionClassStorageStatsToStatsSnapshot(obtained, false),
obtainedSnapshot);
// Change one value and store it to mysql database again
Map<String, Map<Short, Map<Short, ContainerStorageStats>>> newStorageStatsMap =
new HashMap<>(aggregatedStats.getStorageStats());
ContainerStorageStats origin = newStorageStatsMap.get("default").get((short) 1).get((short) 1);
newStorageStatsMap.get("default")
.get((short) 1)
.put((short) 1,
new ContainerStorageStats.Builder(origin).logicalStorageUsage(origin.getLogicalStorageUsage() + 1).build());
mySqlStore.storeAggregatedPartitionClassStorageStats(new AggregatedPartitionClassStorageStats(newStorageStatsMap));
obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(newStorageStatsMap, obtained.getStorageStats());
// Delete some account and container
short accountId = (short) 1;
short containerId = (short) 1;
for (String partitionClassName : partitionNameAndIds.keySet()) {
mySqlStore.deleteAggregatedPartitionClassStatsForAccountContainer(partitionClassName, accountId, containerId);
newStorageStatsMap.get(partitionClassName).get(accountId).remove(containerId);
}
obtained = mySqlStore.queryAggregatedPartitionClassStorageStats();
assertEquals(newStorageStatsMap, obtained.getStorageStats());
mySqlStore3.shutdown();
}
private AccountStatsMySqlStore createAccountStatsMySqlStore(String clusterName, String hostname, int port)
throws Exception {
Path localBackupFilePath = createTemporaryFile();
Properties configProps = Utils.loadPropsFromResource("accountstats_mysql.properties");
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_CLUSTER_NAME, clusterName);
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_HOST_NAME, hostname);
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_DATACENTER_NAME, "dc1");
configProps.setProperty(ClusterMapConfig.CLUSTERMAP_PORT, String.valueOf(port));
configProps.setProperty(AccountStatsMySqlConfig.DOMAIN_NAMES_TO_REMOVE, ".github.com");
configProps.setProperty(AccountStatsMySqlConfig.UPDATE_BATCH_SIZE, String.valueOf(batchSize));
configProps.setProperty(AccountStatsMySqlConfig.POOL_SIZE, String.valueOf(5));
configProps.setProperty(AccountStatsMySqlConfig.LOCAL_BACKUP_FILE_PATH, localBackupFilePath.toString());
VerifiableProperties verifiableProperties = new VerifiableProperties(configProps);
return (AccountStatsMySqlStore) new AccountStatsMySqlStoreFactory(verifiableProperties,
new ClusterMapConfig(verifiableProperties), new MetricRegistry()).getAccountStatsStore();
}
private static Path createTemporaryFile() throws IOException {
Path tempDir = Files.createTempDirectory("AccountStatsMySqlStoreTest");
return tempDir.resolve("localbackup");
}
private static StatsWrapper generateStatsWrapper(int numPartitions, int numAccounts, int numContainers,
StatsReportType reportType) {
Random random = new Random();
List<StatsSnapshot> storeSnapshots = new ArrayList<>();
for (int i = 0; i < numPartitions; i++) {
storeSnapshots.add(TestUtils.generateStoreStats(numAccounts, numContainers, random, reportType));
}
return TestUtils.generateNodeStats(storeSnapshots, 1000, reportType);
}
private static HostAccountStorageStatsWrapper generateHostAccountStorageStatsWrapper(int numPartitions,
int numAccounts, int numContainersPerAccount, StatsReportType reportType) {
HostAccountStorageStats hostAccountStorageStats = new HostAccountStorageStats(
StorageStatsUtilTest.generateRandomHostAccountStorageStats(numPartitions, numAccounts, numContainersPerAccount,
100000L, 2, 10));
StatsHeader statsHeader =
new StatsHeader(StatsHeader.StatsDescription.STORED_DATA_SIZE, 1000, numPartitions, numPartitions,
Collections.emptyList());
return new HostAccountStorageStatsWrapper(statsHeader, hostAccountStorageStats);
}
private void assertTableSize(AccountStatsMySqlStore mySqlStore, int expectedNumRows) throws SQLException {
int numRows = 0;
try (Connection connection = mySqlStore.getDataSource().getConnection()) {
try (Statement statement = connection.createStatement()) {
try (ResultSet resultSet = statement.executeQuery("SELECT * FROM " + AccountReportsDao.ACCOUNT_REPORTS_TABLE)) {
while (resultSet.next()) {
numRows++;
}
}
}
}
assertEquals(expectedNumRows, numRows);
}
private void assertTwoStatsSnapshots(StatsSnapshot snapshot1, StatsSnapshot snapshot2) {
assertEquals("Snapshot values are not equal", snapshot1.getValue(), snapshot2.getValue());
if (snapshot1.getSubMap() == null) {
assertNull(snapshot2.getSubMap());
} else {
assertEquals("Snapshot submap size mismatch", snapshot1.getSubMap().size(), snapshot2.getSubMap().size());
for (String key : snapshot1.getSubMap().keySet()) {
assertTrue(snapshot2.getSubMap().containsKey(key));
assertTwoStatsSnapshots(snapshot1.getSubMap().get(key), snapshot2.getSubMap().get(key));
}
}
}
private StatsWrapper convertAccountStatsToPartitionClassStats(StatsWrapper accountStats,
Map<String, String> partitionKeyToClassName) {
Map<String, StatsSnapshot> partitionClassSubMap = new HashMap<>();
StatsSnapshot originHostStats = accountStats.getSnapshot();
for (String partitionKey : originHostStats.getSubMap().keySet()) {
StatsSnapshot originPartitionStats = originHostStats.getSubMap().get(partitionKey);
String currentClassName = partitionKeyToClassName.get(partitionKey);
StatsSnapshot partitionClassStats =
partitionClassSubMap.computeIfAbsent(currentClassName, k -> new StatsSnapshot(0L, new HashMap<>()));
Map<String, StatsSnapshot> accountContainerSubMap = new HashMap<>();
for (String accountKey : originPartitionStats.getSubMap().keySet()) {
for (Map.Entry<String, StatsSnapshot> containerEntry : originPartitionStats.getSubMap()
.get(accountKey)
.getSubMap()
.entrySet()) {
String containerKey = containerEntry.getKey();
StatsSnapshot containerStats = new StatsSnapshot(containerEntry.getValue());
String accountContainerKey =
Utils.partitionClassStatsAccountContainerKey(Utils.accountIdFromStatsAccountKey(accountKey),
Utils.containerIdFromStatsContainerKey(containerKey));
accountContainerSubMap.put(accountContainerKey, containerStats);
}
}
long accountContainerValue = accountContainerSubMap.values().stream().mapToLong(StatsSnapshot::getValue).sum();
StatsSnapshot partitionStats = new StatsSnapshot(accountContainerValue, accountContainerSubMap);
partitionClassStats.getSubMap().put(partitionKey, partitionStats);
partitionClassStats.setValue(partitionClassStats.getValue() + accountContainerValue);
}
return new StatsWrapper(new StatsHeader(accountStats.getHeader()),
new StatsSnapshot(originHostStats.getValue(), partitionClassSubMap));
}
private HostPartitionClassStorageStatsWrapper convertHostAccountStorageStatsToHostPartitionClassStorageStats(
HostAccountStorageStatsWrapper accountStatsWrapper, Map<Long, String> partitionIdToClassName) {
HostPartitionClassStorageStats hostPartitionClassStorageStats = new HostPartitionClassStorageStats();
Map<Long, Map<Short, Map<Short, ContainerStorageStats>>> storageStats =
accountStatsWrapper.getStats().getStorageStats();
for (long partitionId : storageStats.keySet()) {
Map<Short, Map<Short, ContainerStorageStats>> accountStorageStatsMap = storageStats.get(partitionId);
String partitionClassName = partitionIdToClassName.get(partitionId);
for (short accountId : accountStorageStatsMap.keySet()) {
accountStorageStatsMap.get(accountId)
.values()
.forEach(containerStats -> hostPartitionClassStorageStats.addContainerStorageStats(partitionClassName,
partitionId, accountId, containerStats));
}
}
return new HostPartitionClassStorageStatsWrapper(new StatsHeader(accountStatsWrapper.getHeader()),
hostPartitionClassStorageStats);
}
}
| cgtz/ambry | ambry-mysql/src/integration-test/java/com/github/ambry/accountstats/AccountStatsMySqlStoreIntegrationTest.java | Java | apache-2.0 | 46,523 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<HTML
><HEAD
><TITLE
>Planner/Optimizer</TITLE
><META
NAME="GENERATOR"
CONTENT="Modular DocBook HTML Stylesheet Version 1.79"><LINK
REV="MADE"
HREF="mailto:[email protected]"><LINK
REL="HOME"
TITLE="PostgreSQL 9.2.8 Documentation"
HREF="index.html"><LINK
REL="UP"
TITLE="Overview of PostgreSQL Internals"
HREF="overview.html"><LINK
REL="PREVIOUS"
TITLE="The PostgreSQL Rule System"
HREF="rule-system.html"><LINK
REL="NEXT"
TITLE="Executor"
HREF="executor.html"><LINK
REL="STYLESHEET"
TYPE="text/css"
HREF="stylesheet.css"><META
HTTP-EQUIV="Content-Type"
CONTENT="text/html; charset=ISO-8859-1"><META
NAME="creation"
CONTENT="2014-03-17T19:46:29"></HEAD
><BODY
CLASS="SECT1"
><DIV
CLASS="NAVHEADER"
><TABLE
SUMMARY="Header navigation table"
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TH
COLSPAN="5"
ALIGN="center"
VALIGN="bottom"
><A
HREF="index.html"
>PostgreSQL 9.2.8 Documentation</A
></TH
></TR
><TR
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="top"
><A
TITLE="The PostgreSQL Rule System"
HREF="rule-system.html"
ACCESSKEY="P"
>Prev</A
></TD
><TD
WIDTH="10%"
ALIGN="left"
VALIGN="top"
><A
HREF="overview.html"
ACCESSKEY="U"
>Up</A
></TD
><TD
WIDTH="60%"
ALIGN="center"
VALIGN="bottom"
>Chapter 44. Overview of PostgreSQL Internals</TD
><TD
WIDTH="20%"
ALIGN="right"
VALIGN="top"
><A
TITLE="Executor"
HREF="executor.html"
ACCESSKEY="N"
>Next</A
></TD
></TR
></TABLE
><HR
ALIGN="LEFT"
WIDTH="100%"></DIV
><DIV
CLASS="SECT1"
><H1
CLASS="SECT1"
><A
NAME="PLANNER-OPTIMIZER"
>44.5. Planner/Optimizer</A
></H1
><P
> The task of the <I
CLASS="FIRSTTERM"
>planner/optimizer</I
> is to
create an optimal execution plan. A given SQL query (and hence, a
query tree) can be actually executed in a wide variety of
different ways, each of which will produce the same set of
results. If it is computationally feasible, the query optimizer
will examine each of these possible execution plans, ultimately
selecting the execution plan that is expected to run the fastest.
</P
><DIV
CLASS="NOTE"
><BLOCKQUOTE
CLASS="NOTE"
><P
><B
>Note: </B
> In some situations, examining each possible way in which a query
can be executed would take an excessive amount of time and memory
space. In particular, this occurs when executing queries
involving large numbers of join operations. In order to determine
a reasonable (not necessarily optimal) query plan in a reasonable amount
of time, <SPAN
CLASS="PRODUCTNAME"
>PostgreSQL</SPAN
> uses a <I
CLASS="FIRSTTERM"
>Genetic
Query Optimizer</I
> (see <A
HREF="geqo.html"
>Chapter 51</A
>) when the number of joins
exceeds a threshold (see <A
HREF="runtime-config-query.html#GUC-GEQO-THRESHOLD"
>geqo_threshold</A
>).
</P
></BLOCKQUOTE
></DIV
><P
> The planner's search procedure actually works with data structures
called <I
CLASS="FIRSTTERM"
>paths</I
>, which are simply cut-down representations of
plans containing only as much information as the planner needs to make
its decisions. After the cheapest path is determined, a full-fledged
<I
CLASS="FIRSTTERM"
>plan tree</I
> is built to pass to the executor. This represents
the desired execution plan in sufficient detail for the executor to run it.
In the rest of this section we'll ignore the distinction between paths
and plans.
</P
><DIV
CLASS="SECT2"
><H2
CLASS="SECT2"
><A
NAME="AEN87651"
>44.5.1. Generating Possible Plans</A
></H2
><P
> The planner/optimizer starts by generating plans for scanning each
individual relation (table) used in the query. The possible plans
are determined by the available indexes on each relation.
There is always the possibility of performing a
sequential scan on a relation, so a sequential scan plan is always
created. Assume an index is defined on a
relation (for example a B-tree index) and a query contains the
restriction
<TT
CLASS="LITERAL"
>relation.attribute OPR constant</TT
>. If
<TT
CLASS="LITERAL"
>relation.attribute</TT
> happens to match the key of the B-tree
index and <TT
CLASS="LITERAL"
>OPR</TT
> is one of the operators listed in
the index's <I
CLASS="FIRSTTERM"
>operator class</I
>, another plan is created using
the B-tree index to scan the relation. If there are further indexes
present and the restrictions in the query happen to match a key of an
index, further plans will be considered. Index scan plans are also
generated for indexes that have a sort ordering that can match the
query's <TT
CLASS="LITERAL"
>ORDER BY</TT
> clause (if any), or a sort ordering that
might be useful for merge joining (see below).
</P
><P
> If the query requires joining two or more relations,
plans for joining relations are considered
after all feasible plans have been found for scanning single relations.
The three available join strategies are:
<P
></P
></P><UL
><LI
><P
> <I
CLASS="FIRSTTERM"
>nested loop join</I
>: The right relation is scanned
once for every row found in the left relation. This strategy
is easy to implement but can be very time consuming. (However,
if the right relation can be scanned with an index scan, this can
be a good strategy. It is possible to use values from the current
row of the left relation as keys for the index scan of the right.)
</P
></LI
><LI
><P
> <I
CLASS="FIRSTTERM"
>merge join</I
>: Each relation is sorted on the join
attributes before the join starts. Then the two relations are
scanned in parallel, and matching rows are combined to form
join rows. This kind of join is more
attractive because each relation has to be scanned only once.
The required sorting might be achieved either by an explicit sort
step, or by scanning the relation in the proper order using an
index on the join key.
</P
></LI
><LI
><P
> <I
CLASS="FIRSTTERM"
>hash join</I
>: the right relation is first scanned
and loaded into a hash table, using its join attributes as hash keys.
Next the left relation is scanned and the
appropriate values of every row found are used as hash keys to
locate the matching rows in the table.
</P
></LI
></UL
><P>
</P
><P
> When the query involves more than two relations, the final result
must be built up by a tree of join steps, each with two inputs.
The planner examines different possible join sequences to find the
cheapest one.
</P
><P
> If the query uses fewer than <A
HREF="runtime-config-query.html#GUC-GEQO-THRESHOLD"
>geqo_threshold</A
>
relations, a near-exhaustive search is conducted to find the best
join sequence. The planner preferentially considers joins between any
two relations for which there exist a corresponding join clause in the
<TT
CLASS="LITERAL"
>WHERE</TT
> qualification (i.e., for
which a restriction like <TT
CLASS="LITERAL"
>where rel1.attr1=rel2.attr2</TT
>
exists). Join pairs with no join clause are considered only when there
is no other choice, that is, a particular relation has no available
join clauses to any other relation. All possible plans are generated for
every join pair considered by the planner, and the one that is
(estimated to be) the cheapest is chosen.
</P
><P
> When <TT
CLASS="VARNAME"
>geqo_threshold</TT
> is exceeded, the join
sequences considered are determined by heuristics, as described
in <A
HREF="geqo.html"
>Chapter 51</A
>. Otherwise the process is the same.
</P
><P
> The finished plan tree consists of sequential or index scans of
the base relations, plus nested-loop, merge, or hash join nodes as
needed, plus any auxiliary steps needed, such as sort nodes or
aggregate-function calculation nodes. Most of these plan node
types have the additional ability to do <I
CLASS="FIRSTTERM"
>selection</I
>
(discarding rows that do not meet a specified Boolean condition)
and <I
CLASS="FIRSTTERM"
>projection</I
> (computation of a derived column set
based on given column values, that is, evaluation of scalar
expressions where needed). One of the responsibilities of the
planner is to attach selection conditions from the
<TT
CLASS="LITERAL"
>WHERE</TT
> clause and computation of required
output expressions to the most appropriate nodes of the plan
tree.
</P
></DIV
></DIV
><DIV
CLASS="NAVFOOTER"
><HR
ALIGN="LEFT"
WIDTH="100%"><TABLE
SUMMARY="Footer navigation table"
WIDTH="100%"
BORDER="0"
CELLPADDING="0"
CELLSPACING="0"
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
><A
HREF="rule-system.html"
ACCESSKEY="P"
>Prev</A
></TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="index.html"
ACCESSKEY="H"
>Home</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
><A
HREF="executor.html"
ACCESSKEY="N"
>Next</A
></TD
></TR
><TR
><TD
WIDTH="33%"
ALIGN="left"
VALIGN="top"
>The <SPAN
CLASS="PRODUCTNAME"
>PostgreSQL</SPAN
> Rule System</TD
><TD
WIDTH="34%"
ALIGN="center"
VALIGN="top"
><A
HREF="overview.html"
ACCESSKEY="U"
>Up</A
></TD
><TD
WIDTH="33%"
ALIGN="right"
VALIGN="top"
>Executor</TD
></TR
></TABLE
></DIV
></BODY
></HTML
> | ArcherCraftStore/ArcherVMPeridot | pgsql/doc/postgresql/html/planner-optimizer.html | HTML | apache-2.0 | 9,394 |
/******************** (C) COPYRIGHT 2012 WildFire Team **************************
* 文件名 :main.c
* 描述 :华邦 2M串行flash测试,并将测试信息通过串口1在电脑的超级终端中打印出来。
* 实验平台:野火STM32开发板
* 库版本 :ST3.5.0
*
* 作者 :wildfire team
* 论坛 :http://www.amobbs.com/forum-1008-1.html
* 淘宝 :http://firestm32.taobao.com
**********************************************************************************/
#include "stm32f10x.h"
#include "usart1.h"
#include "spi_flash.h"
typedef enum { FAILED = 0, PASSED = !FAILED} TestStatus;
/* 获取缓冲区的长度 */
#define TxBufferSize1 (countof(TxBuffer1) - 1)
#define RxBufferSize1 (countof(TxBuffer1) - 1)
#define countof(a) (sizeof(a) / sizeof(*(a)))
#define BufferSize (countof(Tx_Buffer)-1)
#define FLASH_WriteAddress 0x00000
#define FLASH_ReadAddress FLASH_WriteAddress
#define FLASH_SectorToErase FLASH_WriteAddress
#define sFLASH_ID 0xEF3015 //W25X16
//#define sFLASH_ID 0xEF4015 //W25Q16
/* 发送缓冲区初始化 */
uint8_t Tx_Buffer[] = " 感谢您选用野火stm32开发板\r\n http://firestm32.taobao.com";
uint8_t Rx_Buffer[BufferSize];
__IO uint32_t DeviceID = 0;
__IO uint32_t FlashID = 0;
__IO TestStatus TransferStatus1 = FAILED;
// 函数原型声明
void Delay(__IO uint32_t nCount);
TestStatus Buffercmp(uint8_t* pBuffer1, uint8_t* pBuffer2, uint16_t BufferLength);
/*
* 函数名:main
* 描述 :主函数
* 输入 :无
* 输出 :无
*/
int main(void)
{
/* 配置串口1为:115200 8-N-1 */
USART1_Config();
printf("\r\n 这是一个2M串行flash(W25X16)实验 \r\n");
/* 2M串行flash W25X16初始化 */
SPI_FLASH_Init();
/* Get SPI Flash Device ID */
DeviceID = SPI_FLASH_ReadDeviceID();
Delay( 200 );
/* Get SPI Flash ID */
FlashID = SPI_FLASH_ReadID();
printf("\r\n FlashID is 0x%X, Manufacturer Device ID is 0x%X\r\n", FlashID, DeviceID);
/* Check the SPI Flash ID */
if (FlashID == sFLASH_ID) /* #define sFLASH_ID 0xEF3015 */
{
printf("\r\n 检测到华邦串行flash W25X16 !\r\n");
/* Erase SPI FLASH Sector to write on */
SPI_FLASH_SectorErase(FLASH_SectorToErase);
/* 将发送缓冲区的数据写到flash中 */
SPI_FLASH_BufferWrite(Tx_Buffer, FLASH_WriteAddress, BufferSize);
printf("\r\n 写入的数据为:%s \r\t", Tx_Buffer);
/* 将刚刚写入的数据读出来放到接收缓冲区中 */
SPI_FLASH_BufferRead(Rx_Buffer, FLASH_ReadAddress, BufferSize);
printf("\r\n 读出的数据为:%s \r\n", Tx_Buffer);
/* 检查写入的数据与读出的数据是否相等 */
TransferStatus1 = Buffercmp(Tx_Buffer, Rx_Buffer, BufferSize);
if( PASSED == TransferStatus1 )
{
printf("\r\n 2M串行flash(W25X16)测试成功!\n\r");
}
else
{
printf("\r\n 2M串行flash(W25X16)测试失败!\n\r");
}
}// if (FlashID == sFLASH_ID)
else
{
printf("\r\n 获取不到 W25X16 ID!\n\r");
}
SPI_Flash_PowerDown();
while(1);
}
/*
* 函数名:Buffercmp
* 描述 :比较两个缓冲区中的数据是否相等
* 输入 :-pBuffer1 src缓冲区指针
* -pBuffer2 dst缓冲区指针
* -BufferLength 缓冲区长度
* 输出 :无
* 返回 :-PASSED pBuffer1 等于 pBuffer2
* -FAILED pBuffer1 不同于 pBuffer2
*/
TestStatus Buffercmp(uint8_t* pBuffer1, uint8_t* pBuffer2, uint16_t BufferLength)
{
while(BufferLength--)
{
if(*pBuffer1 != *pBuffer2)
{
return FAILED;
}
pBuffer1++;
pBuffer2++;
}
return PASSED;
}
void Delay(__IO uint32_t nCount)
{
for(; nCount != 0; nCount--);
}
/******************* (C) COPYRIGHT 2012 WildFire Team *****END OF FILE************/
| sdlylshl/Gateway | Gateway_STM32/user/System/SPI/main.c | C | apache-2.0 | 3,926 |
<?xml version='1.0' encoding='UTF-8'?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html>
<head>
<title>Conductor - ScalaTest 2.1.7 - org.scalatest.concurrent.Conductor</title>
<meta name="description" content="Conductor - ScalaTest 2.1.7 - org.scalatest.concurrent.Conductor" />
<meta name="keywords" content="Conductor ScalaTest 2.1.7 org.scalatest.concurrent.Conductor" />
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<link href="../../../lib/template.css" media="screen" type="text/css" rel="stylesheet" />
<link href="../../../lib/diagrams.css" media="screen" type="text/css" rel="stylesheet" id="diagrams-css" />
<script type="text/javascript" src="../../../lib/jquery.js" id="jquery-js"></script>
<script type="text/javascript" src="../../../lib/jquery-ui.js"></script>
<script type="text/javascript" src="../../../lib/template.js"></script>
<script type="text/javascript" src="../../../lib/tools.tooltip.js"></script>
<script type="text/javascript">
if(top === self) {
var url = '../../../index.html';
var hash = 'org.scalatest.concurrent.Conductor';
var anchor = window.location.hash;
var anchor_opt = '';
if (anchor.length >= 1)
anchor_opt = '@' + anchor.substring(1);
window.location.href = url + '#' + hash + anchor_opt;
}
</script>
<script>
(function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){
(i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o),
m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m)
})(window,document,'script','//www.google-analytics.com/analytics.js','ga');
ga('create', 'UA-71294502-3', 'auto');
ga('send', 'pageview');
</script>
</head>
<body class="type">
<!-- Top of doc.scalatest.org [javascript] -->
<script type="text/javascript">
var rnd = window.rnd || Math.floor(Math.random()*10e6);
var pid204546 = window.pid204546 || rnd;
var plc204546 = window.plc204546 || 0;
var abkw = window.abkw || '';
var absrc = 'http://ab167933.adbutler-ikon.com/adserve/;ID=167933;size=468x60;setID=204546;type=js;sw='+screen.width+';sh='+screen.height+';spr='+window.devicePixelRatio+';kw='+abkw+';pid='+pid204546+';place='+(plc204546++)+';rnd='+rnd+';click=CLICK_MACRO_PLACEHOLDER';
document.write('<scr'+'ipt src="'+absrc+'" type="text/javascript"></scr'+'ipt>');
</script>
<div id="definition">
<img src="../../../lib/class_big.png" />
<p id="owner"><a href="../../package.html" class="extype" name="org">org</a>.<a href="../package.html" class="extype" name="org.scalatest">scalatest</a>.<a href="package.html" class="extype" name="org.scalatest.concurrent">concurrent</a></p>
<h1>Conductor</h1>
</div>
<h4 id="signature" class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">class</span>
</span>
<span class="symbol">
<span class="name deprecated" title="Deprecated: org.scalatest.concurrent.Conductor has been deprecated and will be removed in a future version of ScalaTest. Please mix in trait Conductors, which now defines Conductor, instead of using Conductor directly.">Conductor</span><span class="result"> extends <span class="extype" name="scala.AnyRef">AnyRef</span></span>
</span>
</h4>
<div id="comment" class="fullcommenttop"><div class="comment cmt"><p><strong><code>org.scalatest.concurrent.Conductor</code> has been deprecated and will
be removed in a future version of ScalaTest. Please mix in or import the members
of trait <a href="Conductors.html"><code>Conductors</code></a>, into which <code>Conductor</code> has been moved, instead
of using this class directly.</strong></p><p><strong>The reason <code>Conductor</code> was moved into trait <code>Conductors</code>
was so that it can extend trait
<a href="PatienceConfiguration.html"><code>PatienceConfiguration</code></a>, which was
introduced in ScalaTest 1.8. This will make <code>Conductor</code> configurable in a
way consistent with traits <code>Eventually</code> and <code>AsyncAssertions</code>
(both of which were also introduced in ScalaTest 1.8), and scalable with the
<code>scaled</code> method of trait
<a href="ScaledTimeSpans.html"><code>ScaledTimeSpans</code></a>.</strong></p><p>Class that facilitates the testing of classes, traits, and libraries designed
to be used by multiple threads concurrently.</p><p>A <code>Conductor</code> conducts a multi-threaded scenario by maintaining
a clock of "beats." Beats are numbered starting with 0. You can ask a
<code>Conductor</code> to run threads that interact with the class, trait,
or library (the <em>subject</em>)
you want to test. A thread can call the <code>Conductor</code>'s
<code>waitForBeat</code> method, which will cause the thread to block
until that beat has been reached. The <code>Conductor</code> will advance
the beat only when all threads participating in the test are blocked. By
tying the timing of thread activities to specific beats, you can write
tests for concurrent systems that have deterministic interleavings of
threads.</p><p>A <code>Conductor</code> object has a three-phase lifecycle. It begins its life
in the <em>setup</em> phase. During this phase, you can start threads by
invoking the <code>thread</code> method on the <code>Conductor</code>.
When <code>conduct</code> is invoked on a <code>Conductor</code>, it enters
the <em>conducting</em> phase. During this phase it conducts the one multi-threaded
scenario it was designed to conduct. After all participating threads have exited, either by
returning normally or throwing an exception, the <code>conduct</code> method
will complete, either by returning normally or throwing an exception. As soon as
the <code>conduct</code> method completes, the <code>Conductor</code>
enters its <em>defunct</em> phase. Once the <code>Conductor</code> has conducted
a multi-threaded scenario, it is defunct and can't be reused. To run the same test again,
you'll need to create a new instance of <code>Conductor</code>.</p><p>Here's an example of the use of <code>Conductor</code> to test the <code>ArrayBlockingQueue</code>
class from <code>java.util.concurrent</code>:</p><p><pre class="stHighlighted">
<span class="stReserved">import</span> org.scalatest.fixture.FunSuite
<span class="stReserved">import</span> org.scalatest.matchers.ShouldMatchers
<span class="stReserved">import</span> java.util.concurrent.ArrayBlockingQueue
<br /><span class="stReserved">class</span> <span class="stType">ArrayBlockingQueueSuite</span> <span class="stReserved">extends</span> <span class="stType">FunSuite</span> <span class="stReserved">with</span> <span class="stType">ShouldMatchers</span> {
<br /> test(<span class="stQuotedString">"calling put on a full queue blocks the producer thread"</span>) {
<br /> <span class="stReserved">val</span> conductor = <span class="stReserved">new</span> <span class="stType">Conductor</span>
<span class="stReserved">import</span> conductor._
<br /> <span class="stReserved">val</span> buf = <span class="stReserved">new</span> <span class="stType">ArrayBlockingQueue[Int]</span>(<span class="stLiteral">1</span>)
<br /> thread(<span class="stQuotedString">"producer"</span>) {
buf put <span class="stLiteral">42</span>
buf put <span class="stLiteral">17</span>
beat should be (<span class="stLiteral">1</span>)
}
<br /> thread(<span class="stQuotedString">"consumer"</span>) {
waitForBeat(<span class="stLiteral">1</span>)
buf.take should be (<span class="stLiteral">42</span>)
buf.take should be (<span class="stLiteral">17</span>)
}
<br /> whenFinished {
buf should be (<span class="stQuotedString">'empty</span>)
}
}
}
</pre></p><p>When the test shown is run, it will create one thread named <em>producer</em> and another named
<em>consumer</em>. The producer thread will eventually execute the code passed as a by-name
parameter to <code>thread("producer")</code>:</p><p><pre class="stHighlighted">
buf put <span class="stLiteral">42</span>
buf put <span class="stLiteral">17</span>
beat should be (<span class="stLiteral">1</span>)
</pre></p><p>Similarly, the consumer thread will eventually execute the code passed as a by-name parameter
to <code>thread("consumer")</code>:</p><p><pre class="stHighlighted">
waitForBeat(<span class="stLiteral">1</span>)
buf.take should be (<span class="stLiteral">42</span>)
buf.take should be (<span class="stLiteral">17</span>)
</pre></p><p>The <code>thread</code> method invocations will create the threads and start the threads, but will not immediately
execute the by-name parameter passed to them. They will first block, waiting for the <code>Conductor</code>
to give them a green light to proceed.</p><p>The next call in the test is <code>whenFinished</code>. This method will first call <code>conduct</code> on
the <code>Conductor</code>, which will wait until all threads that were created (in this case, producer and consumer) are
at the "starting line", <em>i.e.</em>, they have all started and are blocked, waiting on the green light.
The <code>conduct</code> method will then give these threads the green light and they will
all start executing their blocks concurrently.</p><p>When the threads are given the green light, the beat is 0. The first thing the producer thread does is put 42 in
into the queue. As the queue is empty at this point, this succeeds. The producer thread next attempts to put a 17
into the queue, but because the queue has size 1, this can't succeed until the consumer thread has read the 42
from the queue. This hasn't happened yet, so producer blocks. Meanwhile, the consumer thread's first act is to
call <code>waitForBeat(1)</code>. Because the beat starts out at 0, this call will block the consumer thread.
As a result, once the producer thread has executed <code>buf put 17</code> and the consumer thread has executed
<code>waitForBeat(1)</code>, both threads will be blocked.</p><p>The <code>Conductor</code> maintains a clock that wakes up periodically and checks to see if all threads
participating in the multi-threaded scenario (in this case, producer and consumer) are blocked. If so, it
increments the beat. Thus sometime later the beat will be incremented, from 0 to 1. Because consumer was
waiting for beat 1, it will wake up (<em>i.e.</em>, the <code>waitForBeat(1)</code> call will return) and
execute the next line of code in its block, <code>buf.take should be (42)</code>. This will succeed, because
the producer thread had previously (during beat 0) put 42 into the queue. This act will also make
producer runnable again, because it was blocked on the second <code>put</code>, which was waiting for another
thread to read that 42.</p><p>Now both threads are unblocked and able to execute their next statement. The order is
non-deterministic, and can even be simultaneous if running on multiple cores. If the <code>consumer</code> thread
happens to execute <code>buf.take should be (17)</code> first, it will block (<code>buf.take</code> will not return), because the queue is
at that point empty. At some point later, the producer thread will execute <code>buf put 17</code>, which will
unblock the consumer thread. Again both threads will be runnable and the order non-deterministic and
possibly simulataneous. The producer thread may charge ahead and run its next statement, <code>beat should be (1)</code>.
This will succeed because the beat is indeed 1 at this point. As this is the last statement in the producer's block,
the producer thread will exit normally (it won't throw an exception). At some point later the consumer thread will
be allowed to complete its last statement, the <code>buf.take</code> call will return 17. The consumer thread will
execute <code>17 should be (17)</code>. This will succeed and as this was the last statement in its block, the consumer will return
normally.</p><p>If either the producer or consumer thread had completed abruptly with an exception, the <code>conduct</code> method
(which was called by <code>whenFinished</code>) would have completed abruptly with an exception to indicate the test
failed. However, since both threads returned normally, <code>conduct</code> will return. Because <code>conduct</code> doesn't
throw an exception, <code>whenFinished</code> will execute the block of code passed as a by-name parameter to it: <code>buf should be ('empty)</code>.
This will succeed, because the queue is indeed empty at this point. The <code>whenFinished</code> method will then return, and
because the <code>whenFinished</code> call was the last statement in the test and it didn't throw an exception, the test completes successfully.</p><p>This test tests <code>ArrayBlockingQueue</code>, to make sure it works as expected. If there were a bug in <code>ArrayBlockingQueue</code>
such as a <code>put</code> called on a full queue didn't block, but instead overwrote the previous value, this test would detect
it. However, if there were a bug in <code>ArrayBlockingQueue</code> such that a call to <code>take</code> called on an empty queue
never blocked and always returned 0, this test might not detect it. The reason is that whether the consumer thread will ever call
<code>take</code> on an empty queue during this test is non-deterministic. It depends on how the threads get scheduled during beat 1.
What is deterministic in this test, because the consumer thread blocks during beat 0, is that the producer thread will definitely
attempt to write to a full queue. To make sure the other scenario is tested, you'd need a different test:</p><p><pre class="stHighlighted">
test(<span class="stQuotedString">"calling take on an empty queue blocks the consumer thread"</span>) {
<br /> <span class="stReserved">val</span> conductor = <span class="stReserved">new</span> <span class="stType">Conductor</span>
<span class="stReserved">import</span> conductor._
<br /> <span class="stReserved">val</span> buf = <span class="stReserved">new</span> <span class="stType">ArrayBlockingQueue[Int]</span>(<span class="stLiteral">1</span>)
<br /> thread(<span class="stQuotedString">"producer"</span>) {
waitForBeat(<span class="stLiteral">1</span>)
buf put <span class="stLiteral">42</span>
buf put <span class="stLiteral">17</span>
}
<br /> thread(<span class="stQuotedString">"consumer"</span>) {
buf.take should be (<span class="stLiteral">42</span>)
buf.take should be (<span class="stLiteral">17</span>)
beat should be (<span class="stLiteral">1</span>)
}
<br /> whenFinished {
buf should be (<span class="stQuotedString">'empty</span>)
}
}
</pre></p><p>In this test, the producer thread will block, waiting for beat 1. The consumer thread will invoke <code>buf.take</code>
as its first act. This will block, because the queue is empty. Because both threads are blocked, the <code>Conductor</code>
will at some point later increment the beat to 1. This will awaken the producer thread. It will return from its
<code>waitForBeat(1)</code> call and execute <code>buf put 42</code>. This will unblock the consumer thread, which will
take the 42, and so on.</p><p>The problem that <code>Conductor</code> is designed to address is the difficulty, caused by the non-deterministic nature
of thread scheduling, of testing classes, traits, and libraries that are intended to be used by multiple threads.
If you just create a test in which one thread reads from an <code>ArrayBlockingQueue</code> and
another writes to it, you can't be sure that you have tested all possible interleavings of threads, no matter
how many times you run the test. The purpose of <code>Conductor</code>
is to enable you to write tests with deterministic interleavings of threads. If you write one test for each possible
interleaving of threads, then you can be sure you have all the scenarios tested. The two tests shown here, for example,
ensure that both the scenario in which a producer thread tries to write to a full queue and the scenario in which a
consumer thread tries to take from an empty queue are tested.</p><p>Class <code>Conductor</code> was inspired by the
<a href="http://www.cs.umd.edu/projects/PL/multithreadedtc/">MultithreadedTC project</a>,
created by Bill Pugh and Nat Ayewah of the University of Maryland, and was brought to ScalaTest with major
contributions by Josh Cough.</p><p>Although useful, bear in mind that a <code>Conductor</code>'s results are not guaranteed to be
accurate 100% of the time. The reason is that it uses <code>java.lang.Thread</code>'s <code>getState</code> method to
decide when to advance the beat. This type of use is advised against in the Javadoc documentation for
<code>getState</code>, which says, "This method is designed for use in monitoring of the system state, not for
synchronization." In short, sometimes the return value of <code>getState</code> may be inacurrate, which in turn means
that sometimes a <code>Conductor</code> may decide to advance the beat too early. The upshot is that while <code>Conductor</code>
can be quite helpful in developing a thread-safe class initially, once the class is done you may not want to run the resulting tests
all the time as regression tests because they may generate occassional false negatives. (<code>Conductor</code> should never generate
a false positive, though, so if a test passes you can believe that. If the test fails consistently, you can believe that as well. But
if a test fails only occasionally, it may or may not indicate an actual concurrency bug.)</p></div><dl class="attributes block"> <dt>Annotations</dt><dd>
<span class="name">@deprecated</span>
</dd><dt>Deprecated</dt><dd class="cmt"><p>org.scalatest.concurrent.Conductor has been deprecated and will be removed in a future version of ScalaTest. Please mix in trait Conductors, which now defines Conductor, instead of using Conductor directly.</p></dd><dt>Source</dt><dd><a href="https://github.com/scalatest/scalatest/tree/release-2.1.7-for-scala-2.10/src/main/scala/org/scalatest/concurrent/Conductor.scala" target="_blank">Conductor.scala</a></dd></dl><div class="toggleContainer block">
<span class="toggle">Linear Supertypes</span>
<div class="superTypes hiddenContent"><span class="extype" name="scala.AnyRef">AnyRef</span>, <span class="extype" name="scala.Any">Any</span></div>
</div></div>
<div id="mbrsel">
<div id="textfilter"><span class="pre"></span><span class="input"><input id="mbrsel-input" type="text" accesskey="/" /></span><span class="post"></span></div>
<div id="order">
<span class="filtertype">Ordering</span>
<ol>
<li class="alpha in"><span>Alphabetic</span></li>
<li class="inherit out"><span>By inheritance</span></li>
</ol>
</div>
<div id="ancestors">
<span class="filtertype">Inherited<br />
</span>
<ol id="linearization">
<li class="in" name="org.scalatest.concurrent.Conductor"><span>Conductor</span></li><li class="in" name="scala.AnyRef"><span>AnyRef</span></li><li class="in" name="scala.Any"><span>Any</span></li>
</ol>
</div><div id="ancestors">
<span class="filtertype"></span>
<ol>
<li class="hideall out"><span>Hide All</span></li>
<li class="showall in"><span>Show all</span></li>
</ol>
<a href="http://docs.scala-lang.org/overviews/scaladoc/usage.html#members" target="_blank">Learn more about member selection</a>
</div>
<div id="visbl">
<span class="filtertype">Visibility</span>
<ol><li class="public in"><span>Public</span></li><li class="all out"><span>All</span></li></ol>
</div>
</div>
<div id="template">
<div id="allMembers">
<div id="constructors" class="members">
<h3>Instance Constructors</h3>
<ol><li name="org.scalatest.concurrent.Conductor#<init>" visbl="pub" data-isabs="false" fullComment="no" group="Ungrouped">
<a id="<init>():org.scalatest.concurrent.Conductor"></a>
<a id="<init>:Conductor"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">new</span>
</span>
<span class="symbol">
<span class="name">Conductor</span><span class="params">()</span>
</span>
</h4>
</li></ol>
</div>
<div id="values" class="values members">
<h3>Value Members</h3>
<ol><li name="scala.AnyRef#!=" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="!=(x$1:AnyRef):Boolean"></a>
<a id="!=(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $bang$eq" class="name">!=</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.Any#!=" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="!=(x$1:Any):Boolean"></a>
<a id="!=(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $bang$eq" class="name">!=</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="scala.AnyRef###" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="##():Int"></a>
<a id="##():Int"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $hash$hash" class="name">##</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Int">Int</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.AnyRef#==" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="==(x$1:AnyRef):Boolean"></a>
<a id="==(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $eq$eq" class="name">==</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.Any#==" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="==(x$1:Any):Boolean"></a>
<a id="==(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $eq$eq" class="name">==</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="scala.Any#asInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="asInstanceOf[T0]:T0"></a>
<a id="asInstanceOf[T0]:T0"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">asInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <span class="extype" name="scala.Any.asInstanceOf.T0">T0</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#beat" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="beat:Int"></a>
<a id="beat:Int"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">beat</span><span class="result">: <span class="extype" name="scala.Int">Int</span></span>
</span>
</h4>
<p class="shortcomment cmt">The current value of the thread clock.</p><div class="fullcomment"><div class="comment cmt"><p>The current value of the thread clock.
</p></div><dl class="paramcmts block"><dt>returns</dt><dd class="cmt"><p>the current beat value
</p></dd></dl></div>
</li><li name="scala.AnyRef#clone" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="clone():Object"></a>
<a id="clone():AnyRef"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">clone</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.AnyRef">AnyRef</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">()</span>
</dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#conduct" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="conduct(clockPeriod:Int,timeout:Int):Unit"></a>
<a id="conduct(Int,Int):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">conduct</span><span class="params">(<span name="clockPeriod">clockPeriod: <span class="extype" name="scala.Int">Int</span></span>, <span name="timeout">timeout: <span class="extype" name="scala.Int">Int</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<p class="shortcomment cmt">Conducts a multithreaded test with the specified clock period (in milliseconds)
and timeout (in seconds).</p><div class="fullcomment"><div class="comment cmt"><p>Conducts a multithreaded test with the specified clock period (in milliseconds)
and timeout (in seconds).</p><p>A <code>Conductor</code> instance maintains an internal clock, which will wake up
periodically and check to see if it should advance the beat, abort the test, or go back to sleep.
It sleeps <code>clockPeriod</code> milliseconds each time. It will abort the test
if either deadlock is suspected or the beat has not advanced for the number of
seconds specified as <code>timeout</code>. Suspected deadlock will be declared if
for some number of consecutive clock cycles, all test threads are in the <code>BLOCKED</code> or
<code>WAITING</code> states and none of them are waiting for a beat.</p></div><dl class="paramcmts block"><dt class="param">clockPeriod</dt><dd class="cmt"><p>The period (in ms) the clock will sleep each time it sleeps</p></dd><dt class="param">timeout</dt><dd class="cmt"><p>The maximum allowed time between successive advances of the beat. If this time
is exceeded, the Conductor will abort the test.</p></dd></dl><dl class="attributes block"> <dt>Exceptions thrown</dt><dd><span class="cmt">Throwable<p>The first error or exception that is thrown by one of the test threads, or
a <code>TestFailedException</code> if the test was aborted due to a timeout or suspected deadlock.
</p></span></dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#conduct" visbl="pub" data-isabs="false" fullComment="no" group="Ungrouped">
<a id="conduct():Unit"></a>
<a id="conduct():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">conduct</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<p class="shortcomment cmt">Conducts a multithreaded test with a default clock period of 10 milliseconds
and default run limit of 5 seconds.</p>
</li><li name="org.scalatest.concurrent.Conductor#conductingHasBegun" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="conductingHasBegun:Boolean"></a>
<a id="conductingHasBegun:Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">conductingHasBegun</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<p class="shortcomment cmt">Indicates whether either of the two overloaded <code>conduct</code> methods
have been invoked.</p><div class="fullcomment"><div class="comment cmt"><p>Indicates whether either of the two overloaded <code>conduct</code> methods
have been invoked.</p><p>This method returns true if either <code>conduct</code> method has been invoked. The
<code>conduct</code> method may have returned or not. (In other words, a <code>true</code>
result from this method does not mean the <code>conduct</code> method has returned,
just that it was already been invoked and,therefore, the multi-threaded scenario it
conducts has definitely begun.)</p></div></div>
</li><li name="scala.AnyRef#eq" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="eq(x$1:AnyRef):Boolean"></a>
<a id="eq(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">eq</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#equals" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="equals(x$1:Any):Boolean"></a>
<a id="equals(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">equals</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.AnyRef#finalize" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="finalize():Unit"></a>
<a id="finalize():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">finalize</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">()</span>
</dd></dl></div>
</li><li name="scala.AnyRef#getClass" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="getClass():Class[_]"></a>
<a id="getClass():Class[_]"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">getClass</span><span class="params">()</span><span class="result">: <span class="extype" name="java.lang.Class">Class</span>[_]</span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.AnyRef#hashCode" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="hashCode():Int"></a>
<a id="hashCode():Int"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">hashCode</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Int">Int</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#isConductorFrozen" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="isConductorFrozen:Boolean"></a>
<a id="isConductorFrozen:Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">isConductorFrozen</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<p class="shortcomment cmt">Indicates whether the conductor has been frozen.</p><div class="fullcomment"><div class="comment cmt"><p>Indicates whether the conductor has been frozen.</p><p>Note: The only way a thread
can freeze the conductor is by calling <code>withConductorFrozen</code>.</p></div></div>
</li><li name="scala.Any#isInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="isInstanceOf[T0]:Boolean"></a>
<a id="isInstanceOf[T0]:Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">isInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="scala.AnyRef#ne" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="ne(x$1:AnyRef):Boolean"></a>
<a id="ne(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">ne</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#notify" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="notify():Unit"></a>
<a id="notify():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">notify</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#notifyAll" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="notifyAll():Unit"></a>
<a id="notifyAll():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">notifyAll</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#synchronized" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="synchronized[T0](x$1:=>T0):T0"></a>
<a id="synchronized[T0](⇒T0):T0"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">synchronized</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="params">(<span name="arg0">arg0: ⇒ <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span>)</span><span class="result">: <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#thread" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="thread(name:String)(fun:=>Unit):Thread"></a>
<a id="thread(String)(⇒Unit):Thread"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">thread</span><span class="params">(<span name="name">name: <span class="extype" name="scala.Predef.String">String</span></span>)</span><span class="params">(<span name="fun">fun: ⇒ <span class="extype" name="scala.Unit">Unit</span></span>)</span><span class="result">: <span class="extype" name="java.lang.Thread">Thread</span></span>
</span>
</h4>
<p class="shortcomment cmt">Creates a new thread with the specified name that will execute the specified function.</p><div class="fullcomment"><div class="comment cmt"><p>Creates a new thread with the specified name that will execute the specified function.</p><p>This method may be safely called by any thread.</p></div><dl class="paramcmts block"><dt class="param">name</dt><dd class="cmt"><p>the name of the newly created thread</p></dd><dt class="param">fun</dt><dd class="cmt"><p>the function to be executed by the newly created thread</p></dd><dt>returns</dt><dd class="cmt"><p>the newly created thread
</p></dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#thread" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="thread(fun:=>Unit):Thread"></a>
<a id="thread(⇒Unit):Thread"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">thread</span><span class="params">(<span name="fun">fun: ⇒ <span class="extype" name="scala.Unit">Unit</span></span>)</span><span class="result">: <span class="extype" name="java.lang.Thread">Thread</span></span>
</span>
</h4>
<p class="shortcomment cmt">Creates a new thread that will execute the specified function.</p><div class="fullcomment"><div class="comment cmt"><p>Creates a new thread that will execute the specified function.</p><p>The name of the thread will be of the form Conductor-Thread-N, where N is some integer.</p><p>This method may be safely called by any thread.</p></div><dl class="paramcmts block"><dt class="param">fun</dt><dd class="cmt"><p>the function to be executed by the newly created thread</p></dd><dt>returns</dt><dd class="cmt"><p>the newly created thread
</p></dd></dl></div>
</li><li name="scala.AnyRef#toString" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="toString():String"></a>
<a id="toString():String"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">toString</span><span class="params">()</span><span class="result">: <span class="extype" name="java.lang.String">String</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait():Unit"></a>
<a id="wait():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">()</span>
</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait(x$1:Long,x$2:Int):Unit"></a>
<a id="wait(Long,Int):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Long">Long</span></span>, <span name="arg1">arg1: <span class="extype" name="scala.Int">Int</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">()</span>
</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait(x$1:Long):Unit"></a>
<a id="wait(Long):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Long">Long</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">()</span>
</dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#waitForBeat" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="waitForBeat(beat:Int):Unit"></a>
<a id="waitForBeat(Int):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">waitForBeat</span><span class="params">(<span name="beat">beat: <span class="extype" name="scala.Int">Int</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<p class="shortcomment cmt">Blocks the current thread until the thread beat reaches the
specified value, at which point the current thread will be unblocked.</p><div class="fullcomment"><div class="comment cmt"><p>Blocks the current thread until the thread beat reaches the
specified value, at which point the current thread will be unblocked.
</p></div><dl class="paramcmts block"><dt class="param">beat</dt><dd class="cmt"><p>the tick value to wait for</p></dd></dl><dl class="attributes block"> <dt>Exceptions thrown</dt><dd><span class="cmt">NotAllowedException<p>if the a <code>beat</code> less than or equal to zero is passed
</p></span></dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#whenFinished" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="whenFinished(fun:=>Unit):Unit"></a>
<a id="whenFinished(⇒Unit):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">whenFinished</span><span class="params">(<span name="fun">fun: ⇒ <span class="extype" name="scala.Unit">Unit</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<p class="shortcomment cmt">Invokes <code>conduct</code> and after <code>conduct</code> method returns,
if <code>conduct</code> returns normally (<em>i.e.</em>, without throwing
an exception), invokes the passed function.</p><div class="fullcomment"><div class="comment cmt"><p>Invokes <code>conduct</code> and after <code>conduct</code> method returns,
if <code>conduct</code> returns normally (<em>i.e.</em>, without throwing
an exception), invokes the passed function.</p><p>If <code>conduct</code> completes abruptly with an exception, this method
will complete abruptly with the same exception and not execute the passed
function.</p><p>This method must be called by the thread that instantiated this <code>Conductor</code>,
and that same thread will invoke <code>conduct</code> and, if it returns noramlly, execute
the passed function.</p><p>Because <code>whenFinished</code> invokes <code>conduct</code>, it can only be invoked
once on a <code>Conductor</code> instance. As a result, if you need to pass a block of
code to <code>whenFinished</code> it should be the last statement of your test. If you
don't have a block of code that needs to be run once all the threads have finished
successfully, then you can simply invoke <code>conduct</code> and never invoke
<code>whenFinished</code>.</p></div><dl class="paramcmts block"><dt class="param">fun</dt><dd class="cmt"><p>the function to execute after <code>conduct</code> call returns</p></dd></dl><dl class="attributes block"> <dt>Exceptions thrown</dt><dd><span class="cmt">NotAllowedException<p>if the calling thread is not the thread that
instantiated this <code>Conductor</code>, or if <code>conduct</code> has already
been invoked on this conductor.
</p></span></dd></dl></div>
</li><li name="org.scalatest.concurrent.Conductor#withConductorFrozen" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="withConductorFrozen[T](fun:=>T):Unit"></a>
<a id="withConductorFrozen[T](⇒T):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">withConductorFrozen</span><span class="tparams">[<span name="T">T</span>]</span><span class="params">(<span name="fun">fun: ⇒ <span class="extype" name="org.scalatest.concurrent.Conductor.withConductorFrozen.T">T</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4>
<p class="shortcomment cmt">Executes the passed function with the <code>Conductor</code> <em>frozen</em> so that it
won't advance the clock.</p><div class="fullcomment"><div class="comment cmt"><p>Executes the passed function with the <code>Conductor</code> <em>frozen</em> so that it
won't advance the clock.</p><p>While the <code>Conductor</code> is frozen, the beat will not advance. Once the
passed function has completed executing, the <code>Conductor</code> will be unfrozen
so that the beat will advance when all threads are blocked, as normal.</p></div><dl class="paramcmts block"><dt class="param">fun</dt><dd class="cmt"><p>the function to execute while the <code>Conductor</code> is frozen.
</p></dd></dl></div>
</li></ol>
</div>
</div>
<div id="inheritedMembers">
<div class="parent" name="scala.AnyRef">
<h3>Inherited from <span class="extype" name="scala.AnyRef">AnyRef</span></h3>
</div><div class="parent" name="scala.Any">
<h3>Inherited from <span class="extype" name="scala.Any">Any</span></h3>
</div>
</div>
<div id="groupedMembers">
<div class="group" name="Ungrouped">
<h3>Ungrouped</h3>
</div>
</div>
</div>
<div id="tooltip"></div>
<div id="footer"> </div>
</body>
</html> | scalatest/scalatest-website | public/scaladoc/2.1.7/org/scalatest/concurrent/Conductor.html | HTML | apache-2.0 | 51,925 |
package info.novatec.testit.webtester.support.assertj;
import static info.novatec.testit.webtester.support.assertj.WebTesterAssertions.assertThat;
import static org.mockito.Mockito.doReturn;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
import org.mockito.Mock;
import org.mockito.runners.MockitoJUnitRunner;
import info.novatec.testit.webtester.pageobjects.RadioButton;
@RunWith(MockitoJUnitRunner.class)
public class RadioButtonAssertTest {
@Mock
RadioButton selectedRadioButton;
@Mock
RadioButton radioButton;
@Before
public void setUp() {
doReturn(true).when(selectedRadioButton).isSelected();
}
/* selected */
@Test
public void selectedTrueTest() {
assertThat(selectedRadioButton).isSelected(true);
}
@Test(expected = AssertionError.class)
public void selectedFalseTest() {
assertThat(radioButton).isSelected(true);
}
@Test
public void notSelectedTrueTest() {
assertThat(radioButton).isNotSelected(true);
}
@Test(expected = AssertionError.class)
public void notSelectedFalseTest() {
assertThat(selectedRadioButton).isNotSelected(true);
}
}
| dbe-it/webtester-core | webtester-support-assertj/src/test/java/info/novatec/testit/webtester/support/assertj/RadioButtonAssertTest.java | Java | apache-2.0 | 1,213 |
## How to index pdf documents in folder with pdf extractor sdk in VB.NET using ByteScout Premium Suite
### This code in VB.NET shows how to index pdf documents in folder with pdf extractor sdk with this how to tutorial
Index pdf documents in folder with pdf extractor sdk is simple to apply in VB.NET if you use these source codes below. ByteScout Premium Suite is the bundle that includes twelve SDK products from ByteScout including tools and components for PDF, barcodes, spreadsheets, screen video recording and you can use it to index pdf documents in folder with pdf extractor sdk with VB.NET.
Want to save time? You will save a lot of time on writing and testing code as you may just take the VB.NET code from ByteScout Premium Suite for index pdf documents in folder with pdf extractor sdk below and use it in your application. This VB.NET sample code is all you need for your app. Just copy and paste the code, add references (if needs to) and you are all set! This basic programming language sample code for VB.NET will do the whole work for you to index pdf documents in folder with pdf extractor sdk.
You can download free trial version of ByteScout Premium Suite from our website to see and try many others source code samples for VB.NET.
## REQUEST FREE TECH SUPPORT
[Click here to get in touch](https://bytescout.zendesk.com/hc/en-us/requests/new?subject=ByteScout%20Premium%20Suite%20Question)
or just send email to [[email protected]](mailto:[email protected]?subject=ByteScout%20Premium%20Suite%20Question)
## ON-PREMISE OFFLINE SDK
[Get Your 60 Day Free Trial](https://bytescout.com/download/web-installer?utm_source=github-readme)
[Explore SDK Docs](https://bytescout.com/documentation/index.html?utm_source=github-readme)
[Sign Up For Online Training](https://academy.bytescout.com/)
## ON-DEMAND REST WEB API
[Get your API key](https://pdf.co/documentation/api?utm_source=github-readme)
[Explore Web API Documentation](https://pdf.co/documentation/api?utm_source=github-readme)
[Explore Web API Samples](https://github.com/bytescout/ByteScout-SDK-SourceCode/tree/master/PDF.co%20Web%20API)
## VIDEO REVIEW
[https://www.youtube.com/watch?v=NEwNs2b9YN8](https://www.youtube.com/watch?v=NEwNs2b9YN8)
<!-- code block begin -->
##### ****IndexDocsInFolder.vbproj:**
```
<?xml version="1.0" encoding="utf-8"?>
<Project ToolsVersion="15.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<Import Project="$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props" Condition="Exists('$(MSBuildExtensionsPath)\$(MSBuildToolsVersion)\Microsoft.Common.props')" />
<PropertyGroup>
<Configuration Condition=" '$(Configuration)' == '' ">Debug</Configuration>
<Platform Condition=" '$(Platform)' == '' ">AnyCPU</Platform>
<ProjectGuid>{846F275E-BE99-4254-85ED-B8CBBB4546A9}</ProjectGuid>
<OutputType>Exe</OutputType>
<StartupObject>IndexDocsInFolder.Program</StartupObject>
<RootNamespace>IndexDocsInFolder</RootNamespace>
<AssemblyName>IndexDocsInFolder</AssemblyName>
<FileAlignment>512</FileAlignment>
<MyType>Console</MyType>
<TargetFrameworkVersion>v2.0</TargetFrameworkVersion>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Debug|AnyCPU' ">
<PlatformTarget>AnyCPU</PlatformTarget>
<DebugSymbols>true</DebugSymbols>
<DebugType>full</DebugType>
<DefineDebug>true</DefineDebug>
<DefineTrace>true</DefineTrace>
<OutputPath>bin\Debug\</OutputPath>
<DocumentationFile>IndexDocsInFolder.xml</DocumentationFile>
<NoWarn>42016,41999,42017,42018,42019,42032,42036,42020,42021,42022</NoWarn>
</PropertyGroup>
<PropertyGroup Condition=" '$(Configuration)|$(Platform)' == 'Release|AnyCPU' ">
<PlatformTarget>AnyCPU</PlatformTarget>
<DebugType>pdbonly</DebugType>
<DefineDebug>false</DefineDebug>
<DefineTrace>true</DefineTrace>
<Optimize>true</Optimize>
<OutputPath>bin\Release\</OutputPath>
<DocumentationFile>IndexDocsInFolder.xml</DocumentationFile>
<NoWarn>42016,41999,42017,42018,42019,42032,42036,42020,42021,42022</NoWarn>
</PropertyGroup>
<PropertyGroup>
<OptionExplicit>On</OptionExplicit>
</PropertyGroup>
<PropertyGroup>
<OptionCompare>Binary</OptionCompare>
</PropertyGroup>
<PropertyGroup>
<OptionStrict>Off</OptionStrict>
</PropertyGroup>
<PropertyGroup>
<OptionInfer>On</OptionInfer>
</PropertyGroup>
<ItemGroup>
<Reference Include="Bytescout.PDFExtractor, Version=9.1.0.3170, Culture=neutral, PublicKeyToken=f7dd1bd9d40a50eb, processorArchitecture=MSIL">
<SpecificVersion>False</SpecificVersion>
<HintPath>..\..\..\..\..\..\..\..\..\..\Program Files\Bytescout PDF Extractor SDK\net2.00\Bytescout.PDFExtractor.dll</HintPath>
</Reference>
<Reference Include="System" />
<Reference Include="System.Data" />
<Reference Include="System.Deployment" />
<Reference Include="System.Drawing" />
<Reference Include="System.Xml" />
</ItemGroup>
<ItemGroup>
<Import Include="Microsoft.VisualBasic" />
<Import Include="System" />
<Import Include="System.Collections" />
<Import Include="System.Collections.Generic" />
<Import Include="System.Data" />
<Import Include="System.Diagnostics" />
</ItemGroup>
<ItemGroup>
<Compile Include="Program.vb" />
</ItemGroup>
<ItemGroup>
<Content Include="Files\ImageSample.png">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</Content>
</ItemGroup>
<ItemGroup>
<Content Include="Files\SampleFile1.pdf">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</Content>
<Content Include="Files\SampleFile2.pdf">
<CopyToOutputDirectory>Always</CopyToOutputDirectory>
</Content>
</ItemGroup>
<Import Project="$(MSBuildToolsPath)\Microsoft.VisualBasic.targets" />
</Project>
```
<!-- code block end -->
<!-- code block begin -->
##### ****Program.vb:**
```
Imports System.IO
Imports Bytescout.PDFExtractor
Module Program
Sub Main()
Try
' Output file list
Dim lstAllFilesInfo = New List(Of FileIndexOutput)()
' Get all files inside directory
Dim allFiles = Directory.GetFiles(".\Files", "*.*")
' Iterate all files, and get details
For Each itmFile In allFiles
' Get basic file information
Dim fileInfo As FileInfo = New FileInfo(itmFile)
' Check whether file is supported
If _IsFileSupported(fileInfo) Then
' Fill file index model
Dim oFileIndex = New FileIndexOutput()
oFileIndex.fileName = fileInfo.Name
oFileIndex.fileDate = fileInfo.CreationTime
oFileIndex.content = _GetFileContent(fileInfo)
' Add to final list
lstAllFilesInfo.Add(oFileIndex)
End If
Next
' Print all output
Console.WriteLine("Total {0} files indexed" & vbLf, lstAllFilesInfo.Count)
For Each itmFileInfo In lstAllFilesInfo
Console.WriteLine("fileName: {0}", itmFileInfo.fileName)
Console.WriteLine("fileDate: {0}", itmFileInfo.fileDate.ToString("MMM dd yyyy hh:mm:ss"))
Console.WriteLine("content: {0}", itmFileInfo.content.Trim())
Console.WriteLine(vbLf)
Next
Catch ex As Exception
Console.WriteLine(("ERROR:" + ex.Message))
End Try
Console.WriteLine("Press any key to exit...")
Console.ReadLine()
End Sub
''' <summary>
''' Get File COntent
''' </summary>
Private Function _GetFileContent(ByVal fileInfo As FileInfo) As String
Dim fileExtension As String = System.IO.Path.GetExtension(fileInfo.FullName)
If fileExtension = ".pdf" Then
Return _GetPdfFileContent(fileInfo)
ElseIf fileExtension = ".png" OrElse fileExtension = ".jpg" Then
Return _GetImageContet(fileInfo)
End If
Throw New Exception("File not supported.")
End Function
''' <summary>
''' Get PDF File Content
''' </summary>
Private Function _GetPdfFileContent(ByVal fileInfo As FileInfo) As String
' Read all file content...
Using textExtractor As TextExtractor = New TextExtractor("demo", "demo")
' Load Document
textExtractor.LoadDocumentFromFile(fileInfo.FullName)
Return textExtractor.GetText()
End Using
End Function
''' <summary>
''' Get Image Contents
''' </summary>
Private Function _GetImageContet(ByVal fileInfo As FileInfo) As String
' Read all file content...
Using extractor As TextExtractor = New TextExtractor()
' Load document
extractor.LoadDocumentFromFile(fileInfo.FullName)
' Set option to repair text
extractor.OCRMode = OCRMode.TextFromImagesAndVectorsAndRepairedFonts
' Enable Optical Character Recognition (OCR)
' in .Auto mode (SDK automatically checks if needs to use OCR or not)
extractor.OCRMode = OCRMode.Auto
' Set the location of OCR language data files
extractor.OCRLanguageDataFolder = "c:\Program Files\Bytescout PDF Extractor SDK\ocrdata_best\"
' Set OCR language
extractor.OCRLanguage = "eng" '"eng" for english, "deu" for German, "fra" for French, "spa" for Spanish etc - according to files in "ocrdata" folder
' Find more language files at https://github.com/bytescout/ocrdata
' Set PDF document rendering resolution
extractor.OCRResolution = 300
' Read all text
Return extractor.GetText()
End Using
End Function
''' <summary>
''' Check whether file is valid
''' </summary>
Private Function _IsFileSupported(ByVal fileInfo As FileInfo) As Boolean
' Get File Extension
Dim fileExtension As String = Path.GetExtension(fileInfo.Name)
' Check whether file extension is valid
Return (fileExtension = ".pdf" OrElse fileExtension = ".png" OrElse fileExtension = ".jpg")
End Function
''' <summary>
''' FileIndexOutput class
''' </summary>
Public Class FileIndexOutput
Public Property fileName As String
Public Property fileDate As DateTime
Public Property content As String
End Class
End Module
```
<!-- code block end --> | bytescout/ByteScout-SDK-SourceCode | Premium Suite/VB.NET/Index pdf documents in folder with pdf extractor sdk/README.md | Markdown | apache-2.0 | 10,621 |
package com.jt.test.sort;
import java.util.Arrays;
import java.util.Random;
/**
* since 2016/10/19.
*/
public class Select {
public static void sort(Comparable[] data) {
for (int i = 0; i < data.length; i++) {
int min = i;
for (int j = i+1; j < data.length; j++) {
if (less(data, min, j)) {
min = j;
}
}
exch(data, i, min);
}
}
private static boolean less(Comparable[] data, int min, int j) {
return data[min].compareTo(data[j]) > 0;
}
private static void exch(Comparable[] data, int i, int min) {
Comparable tmp = data[i];
data[i] = data[min];
data[min] = tmp;
}
public static boolean isSort(Comparable[] data) {
for (int i = 0; i < data.length-1; i++) {
if (less(data, i, i + 1)) {
return false;
}
}
return true;
}
public static void main(String[] args) throws Exception {
Random random = new Random();
Integer[] datas = new Integer[10];
for (int i = 0; i < 10; i++) {
datas[i] = random.nextInt(100);
}
sort(datas);
if (!isSort(datas)) {
System.err.println("not sort");
}
System.out.println(Arrays.toString(datas));
}
}
| jt120/algorithm | new-man/src/test/java/com/jt/test/sort/Select.java | Java | apache-2.0 | 1,378 |
/*
* Copyright 2000-2008 JetBrains s.r.o.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.jetbrains.plugins.groovy.lang.actions.updown;
import com.intellij.ide.DataManager;
import com.intellij.openapi.application.Result;
import com.intellij.openapi.command.WriteCommandAction;
import com.intellij.openapi.editor.Editor;
import com.intellij.openapi.editor.actionSystem.EditorActionHandler;
import com.intellij.openapi.editor.actionSystem.EditorActionManager;
import com.intellij.openapi.editor.ex.DocumentEx;
import com.intellij.testFramework.fixtures.LightCodeInsightFixtureTestCase;
import org.jetbrains.plugins.groovy.GroovyFileType;
import org.jetbrains.plugins.groovy.util.TestUtils;
import org.jetbrains.plugins.groovy.lang.editor.actions.GroovyEditorActionsManager;
import java.util.List;
/**
* @author ilyas
*/
public class GroovyMoveStatementTest extends LightCodeInsightFixtureTestCase {
@Override
protected String getBasePath() {
return TestUtils.getTestDataPath() + "groovy/actions/moveStatement/";
}
public void testClazz1() throws Throwable { downTest(); }
public void testClazz2() throws Throwable { upTest(); }
public void testClos2() throws Throwable { upTest(); }
public void testMeth1() throws Throwable { downTest(); }
public void testMeth2() throws Throwable { downTest(); }
public void testMeth3() throws Throwable { upTest(); }
public void testMeth4() throws Throwable { upTest(); }
public void testIfst() throws Throwable { downTest(); }
public void testIfst2() throws Throwable { upTest(); }
public void testSimple1() throws Throwable { downTest(); }
public void testSimple2() throws Throwable { upTest(); }
public void testTryst1() throws Throwable { downTest(); }
public void testTryst2() throws Throwable { downTest(); }
public void testStatementOutsideClosure() throws Throwable { downTest(); }
public void testVariableOutsideClosure() throws Throwable { upTest(); }
public void testVariableOutsideClosureDown() throws Throwable { downTest(); }
public void testStatementInsideClosure() throws Throwable { upTest(); }
public void testMoveGroovydocWithMethod() throws Throwable { downTest(); }
public void testMoveMethodWithGroovydoc() throws Throwable { downTest(); }
public void testMoveSecondFieldUp() throws Throwable { upTest(); }
public void testMoveFirstFieldDown() throws Throwable { downTest(); }
public void testVariableOverMethodInScript() throws Throwable { downTest(); }
public void testVariableOverClassInScript() throws Throwable { downTest(); }
public void testUpFromLastOffset() throws Throwable { upTest(); }
public void testClosureWithPrequel() throws Throwable { upTest(); }
public void testMultiLineVariable() throws Throwable { downTest(); }
public void testClosureVariableByRBrace() throws Throwable { upTest(); }
private void downTest() throws Exception {
doTest(GroovyEditorActionsManager.MOVE_STATEMENT_DOWN_ACTION);
}
private void upTest() throws Exception {
doTest(GroovyEditorActionsManager.MOVE_STATEMENT_UP_ACTION);
}
public void doTest(final String actionId) throws Exception {
final List<String> data = TestUtils.readInput(getTestDataPath() + getTestName(true) + ".test");
myFixture.configureByText(GroovyFileType.GROOVY_FILE_TYPE, data.get(0));
final EditorActionHandler handler = EditorActionManager.getInstance().getActionHandler(actionId);
new WriteCommandAction(getProject()) {
protected void run(Result result) throws Throwable {
final Editor editor = myFixture.getEditor();
handler.execute(editor, DataManager.getInstance().getDataContext(editor.getContentComponent()));
((DocumentEx)editor.getDocument()).stripTrailingSpaces(false);
}
}.execute();
myFixture.checkResult(data.get(1));
}
}
| jexp/idea2 | plugins/groovy/test/org/jetbrains/plugins/groovy/lang/actions/updown/GroovyMoveStatementTest.java | Java | apache-2.0 | 4,349 |
package org.apereo.cas.web.report;
import org.apereo.cas.web.report.util.ControllerUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.actuate.endpoint.mvc.AbstractNamedMvcEndpoint;
import org.springframework.cloud.bus.BusProperties;
import org.springframework.cloud.config.server.config.ConfigServerProperties;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.servlet.ModelAndView;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
import java.util.HashMap;
import java.util.Map;
/**
* Controller that exposes the CAS internal state and beans
* as JSON. The report is available at {@code /status/config}.
*
* @author Misagh Moayyed
* @since 4.1
*/
public class ConfigurationStateController extends AbstractNamedMvcEndpoint {
private static final String VIEW_CONFIG = "monitoring/viewConfig";
@Autowired(required = false)
private BusProperties busProperties;
@Autowired
private ConfigServerProperties configServerProperties;
public ConfigurationStateController() {
super("configstate", "/config", true, true);
}
/**
* Handle request.
*
* @param request the request
* @param response the response
* @return the model and view
* @throws Exception the exception
*/
@GetMapping
protected ModelAndView handleRequestInternal(final HttpServletRequest request,
final HttpServletResponse response) throws Exception {
final Map<String, Object> model = new HashMap<>();
final String path = request.getContextPath();
ControllerUtils.configureModelMapForConfigServerCloudBusEndpoints(busProperties, configServerProperties, path, model);
return new ModelAndView(VIEW_CONFIG, model);
}
}
| gabedwrds/cas | support/cas-server-support-reports/src/main/java/org/apereo/cas/web/report/ConfigurationStateController.java | Java | apache-2.0 | 1,890 |
/**
* Copyright (C) 2009 bdferris <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#import "OBAApplicationDelegate.h"
NS_ASSUME_NONNULL_BEGIN
@interface OBANetworkErrorAlertViewDelegate : NSObject <UIAlertViewDelegate> {
OBAApplicationDelegate * _context;
}
- (id) initWithContext:(OBAApplicationDelegate*)context;
@end
NS_ASSUME_NONNULL_END | tomtclai/onebusaway-iphone | controls/alerts/OBANetworkErrorAlertViewDelegate.h | C | apache-2.0 | 894 |
package com.canoo.ant.table;
import com.canoo.ant.filter.AllEqualsFilter;
import com.canoo.ant.filter.AllFilter;
import com.canoo.ant.filter.ITableFilter;
import org.apache.log4j.Logger;
import java.io.File;
import java.io.IOException;
import java.util.*;
public abstract class APropertyTable implements IPropertyTable {
private static final Logger LOG = Logger.getLogger(APropertyTable.class);
private static final int MAX_DEPTH = 10; // max recursion depth
private static final ThreadLocal DEPTH = new ThreadLocal();
private File fContainer;
private String fTable;
private String fPrefix;
private ITableFilter fFilter;
private List fRawTable;
private List fMetaTable;
protected static final String EMPTY = "";
protected static final String KEY_JOIN = "JOIN";
protected APropertyTable() {
fFilter = new AllFilter();
if( DEPTH.get() == null ) {
setDepth(0);
}
}
private static void setDepth(int depth){
DEPTH.set(new Integer(depth));
}
private static int getDepth(){
return((Integer)DEPTH.get()).intValue();
}
/**
* @return columnName -> expander (Type IPropertyTable)
*/
public Map getColumnInfo() {
List meta = getMetaTable();
Map result = new HashMap(meta.size()); // smaller is likely
// find all properties for this table
List tableSpecificColumnInfo = new AllEqualsFilter(TableFactory.KEY_TABLE).filter(meta, getTable());
for (Iterator eachColumnInfo = tableSpecificColumnInfo.iterator(); eachColumnInfo.hasNext();) {
Properties colInfo = (Properties) eachColumnInfo.next();
try {
// tableClass defaults to the current class
IPropertyTable table = TableFactory.createTable(colInfo, getClass().getName());
ITableFilter filter = TableFactory.createFilter(colInfo);
final File container;
if (colInfo.getProperty(TableFactory.KEY_CONTAINER, "").length() > 0) {
container = new File(getContainer().getParentFile(), colInfo.getProperty(TableFactory.KEY_CONTAINER));
colInfo.remove(TableFactory.KEY_CONTAINER); // to be sure that it doesn't get used with wrong path
}
else {
container = getContainer();
}
String key = colInfo.getProperty(TableFactory.KEY_NAME); // no default possible
TableFactory.initOrDefault(table, filter, colInfo, container, key);
result.put(key, table);
} catch (Exception e) {
LOG.error("cannot work with Property: " + colInfo.toString(), e);
throw new RuntimeException("Cannot work with Property: " + colInfo.toString(), e);
}
}
return result;
}
public List getPropertiesList(final String filterValue, final String prefix) {
// start with copy of initial table
// if current filter concerns extension keys, filter before extending
// filtering in advance also lowers memory consumption in the average
List result = getFilter().filter(getRawTable(), filterValue);
if (getDepth() > MAX_DEPTH){
LOG.error("processing grounded due to excessive recursion calls: "+getDepth());
return result;
}
setDepth(getDepth()+1);
final Map colInfo = getColumnInfo();
// only go over entries in the colInfo.
// (property names without colInfo info are not expanded)
for (Iterator eachExpandable = colInfo.keySet().iterator(); eachExpandable.hasNext();) {
String expansionName = (String) eachExpandable.next();
expandName(result, expansionName, colInfo);
}
setDepth(getDepth()-1);
// filter a second time to allow filters to work on expansions
result = getFilter().filter(result, filterValue);
// prefix is processed after filtering
if (prefix!=null && prefix.length()>0){
result = mapPrefix(result, prefix);
}
return result;
}
// like a ruby map!
private List mapPrefix(List result, final String prefix) {
List collect = new ArrayList(result.size());
for (Iterator eachProps = result.iterator(); eachProps.hasNext();) {
Properties props = (Properties) eachProps.next();
Properties mapped = new Properties();
for (Iterator eachKey = props.keySet().iterator(); eachKey.hasNext();) {
String key = (String) eachKey.next();
String value = props.getProperty(key);
mapped.setProperty(prefix+"."+key, value);
}
collect.add(mapped);
}
return collect;
}
protected void expandName(List result, String expansionName, Map colInfo) {
List expansions = new LinkedList(); // cannot add while iterating. store and add later
for (Iterator eachProperties = result.iterator(); eachProperties.hasNext();) {
Properties props = (Properties) eachProperties.next();
List newExpansions = expandProps(props, expansionName, colInfo);
// default behaviour: like OUTER join, we do not shrink if nothing found
if (newExpansions.size() > 0) {
eachProperties.remove();
expansions.addAll(newExpansions);
}
}
result.addAll(expansions);
}
protected List expandProps(Properties props, String expansionName, Map colInfo) {
String value = props.getProperty(expansionName);
List propExpansions = new LinkedList();
IPropertyTable expansionTable = (IPropertyTable) colInfo.get(expansionName);
// recursive call
List expandWith = expansionTable.getPropertiesList(value, expansionTable.getPrefix());
for (Iterator eachExpansion = expandWith.iterator(); eachExpansion.hasNext();) {
Properties expandProps = (Properties) eachExpansion.next();
// merge expansion with current line
expandProps.putAll(props);
// store for later adding
propExpansions.add(expandProps);
}
return propExpansions;
}
//-------------- field accessors ------------------
public File getContainer() {
return fContainer;
}
public void setContainer(File container) {
fContainer = container;
}
public String getTable() {
return fTable;
}
public void setTable(String table) {
fTable = table;
}
public ITableFilter getFilter() {
return fFilter;
}
public void setFilter(ITableFilter filter) {
fFilter = filter;
}
public String getPrefix() {
return fPrefix;
}
public void setPrefix(String prefix) {
fPrefix = prefix;
}
//-------------- how to read specifics ------------------
/** lazy getter, cached */
public List getRawTable() {
fRawTable = getCachedTable(getTable(), fRawTable);
return fRawTable;
}
/** lazy getter, cached */
public List getMetaTable() {
if (hasJoinTable()) {
fMetaTable = getCachedTable(KEY_JOIN, fMetaTable);
}
else {
fMetaTable = Collections.EMPTY_LIST;
}
return fMetaTable;
}
/**
* Indicates if the table container has a JOIN table.
* @return default is <code>true</code>
*/
protected boolean hasJoinTable() {
return true;
}
protected List getCachedTable(final String table, List tableCache) {
if (tableCache != null) {
return tableCache;
}
try {
tableCache = read(table);
}
catch (final IOException e) {
LOG.error("Cannot read " + getContainer() + " " + table, e);
String message = "Cannot read container >" + getContainer() + "<";
if (table != null)
message += " (table " + table + ")";
message += ": " + e.getMessage();
throw new RuntimeException(message, e);
}
if (tableCache.isEmpty()) {
LOG.debug("no entry in " + getContainer() + "/" + table);
}
LOG.debug(tableCache.size()+" entries in "+getContainer()+ " " + table);
return tableCache;
}
protected abstract List read(String table) throws IOException;
}
| lukecampbell/webtest | src/main/java/com/canoo/ant/table/APropertyTable.java | Java | apache-2.0 | 8,495 |
// For conditions of distribution and use, see copyright notice in LICENSE
#include "StableHeaders.h"
#include "ZipAssetBundle.h"
#include "ZipHelpers.h"
#include "ZipWorker.h"
#include "CoreDefines.h"
#include "Framework.h"
#include "FrameAPI.h"
#include "AssetAPI.h"
#include "AssetCache.h"
#include "LoggingFunctions.h"
#include <Urho3D/IO/FileSystem.h>
#include <zzip/zzip.h>
namespace Tundra
{
ZipAssetBundle::ZipAssetBundle(AssetAPI *owner, const String &type, const String &name) :
IAssetBundle(owner, type, name),
worker_(0),
archive_(0),
fileCount_(-1),
done_(false),
success_(false)
{
}
ZipAssetBundle::~ZipAssetBundle()
{
Unload();
}
void ZipAssetBundle::DoUnload()
{
Close();
StopThread();
fileCount_ = -1;
}
void ZipAssetBundle::Close()
{
if (archive_)
{
zzip_dir_close(archive_);
archive_ = 0;
}
}
bool ZipAssetBundle::DeserializeFromDiskSource()
{
if (!assetAPI_->Cache())
{
LogError("ZipAssetBundle::DeserializeFromDiskSource: Cannot process archive, AssetAPI cache is null.");
return false;
}
else if (DiskSource().Empty())
{
LogError("ZipAssetBundle::DeserializeFromDiskSource: Cannot process archive, no disk source for " + Name());
return false;
}
/* We want to detect if the extracted files are already up to date to save time.
If the last modified date for the sub asset is the same as the parent zip file,
we don't extract it. If the zip is re-downloaded from source everything will get unpacked even
if only one file would have changed inside it. We could do uncompressed size comparisons
but that is not a absolute guarantee that the file has not changed. We'll be on the safe side
to unpack the whole zip file. Zip files are meant for deploying the scene and should be touched
rather rarely. Note that local:// refs are unpacked to cache but the zips disk source is not in the
cache. Meaning that local:// zip files will always be extracted fully even if the disk source
was not changed, we don't have a mechanism to get the last modified date properly except from
the asset cache. For local scenes this should be fine as there is no real need to
zip the scene up as you already have the disk sources right there in the storage.
The last modified query will fail if the file is open with zziplib, do it first. */
uint zipLastModified = assetAPI_->Cache()->LastModified(Name());
const String diskSourceInternal = Urho3D::GetInternalPath(DiskSource());
zzip_error_t error = ZZIP_NO_ERROR;
archive_ = zzip_dir_open(diskSourceInternal.CString(), &error);
if (CheckAndLogZzipError(error) || CheckAndLogArchiveError(archive_) || !archive_)
{
archive_ = 0;
return false;
}
int uncompressing = 0;
ZZIP_DIRENT archiveEntry;
while(zzip_dir_read(archive_, &archiveEntry))
{
String relativePath = Urho3D::GetInternalPath(archiveEntry.d_name);
if (!relativePath.EndsWith("/"))
{
String subAssetRef = GetFullAssetReference(relativePath);
ZipArchiveFile file;
file.relativePath = relativePath;
file.cachePath = Urho3D::GetInternalPath(assetAPI_->Cache()->DiskSourceByRef(subAssetRef));
file.lastModified = assetAPI_->Cache()->LastModified(subAssetRef);
file.compressedSize = archiveEntry.d_csize;
file.uncompressedSize = archiveEntry.st_size;
/* Mark this file for extraction. If both cache files have valid dates
and they differ extract. If they have the same date stamp skip extraction.
Note that file.lastModified will be non-valid for non cached files so we
will cover also missing files. */
file.doExtract = (zipLastModified > 0 && file.lastModified > 0) ? (zipLastModified != file.lastModified) : true;
if (file.doExtract)
uncompressing++;
files_.Push(file);
fileCount_++;
}
}
// Close the zzip directory ptr
Close();
// If the zip file was empty we don't want IsLoaded to fail on the files_ check.
// The bundle loaded fine but there was no content, log a warning.
if (files_.Empty())
{
LogWarning("ZipAssetBundle: Bundle loaded but does not contain any files " + Name());
files_.Push(ZipArchiveFile());
Loaded.Emit(this);
return true;
}
// Don't spin the worker if all sub assets are up to date in cache.
if (uncompressing > 0)
{
// Now that the file info has been read, continue in a worker thread.
LogDebug("ZipAssetBundle: File information read for " + Name() + ". File count: " + String(files_.Size()) + ". Starting worker thread to uncompress " + String(uncompressing) + " files.");
// ZipWorker is a QRunnable we can pass to QThreadPool, it will handle scheduling it and deletes it when done.
worker_ = new ZipWorker(this, zipLastModified, diskSourceInternal, files_);
if (!worker_->Run())
{
LogError("ZipAssetBundle: Failed to start worker thread for " + Name());
files_.Clear();
return false;
}
assetAPI_->GetFramework()->Frame()->Updated.Connect(this, &ZipAssetBundle::CheckDone);
}
else
Loaded.Emit(this);
return true;
}
bool ZipAssetBundle::DeserializeFromData(const u8* /*data*/, uint /*numBytes*/)
{
/** @note At this point it seems zzip needs a disk source to do processing
so we require disk source for the archive. This might change in the future by changing the lib. */
return false;
}
Vector<u8> ZipAssetBundle::GetSubAssetData(const String &subAssetName)
{
/* Makes no sense to keep the whole zip file contents in memory as only
few files could be wanted from a 100mb bundle. Additionally all asset would take 2x the memory.
We could make this function also open the zip file and uncompress the data for every sub asset request.
But that would be rather pointless, not to mention slower, as we already have the unpacked individual
assets on disk. If the unpacking to disk changes we might need to rethink this. */
String filePath = GetSubAssetDiskSource(subAssetName);
if (filePath.Empty())
return Vector<u8>();
Vector<u8> data;
return LoadFileToVector(filePath, data) ? data : Vector<u8>();
}
String ZipAssetBundle::GetSubAssetDiskSource(const String &subAssetName)
{
return assetAPI_->Cache()->FindInCache(GetFullAssetReference(subAssetName));
}
String ZipAssetBundle::GetFullAssetReference(const String &subAssetName)
{
return Name() + "#" + subAssetName;
}
bool ZipAssetBundle::IsLoaded() const
{
return (archive_ != 0 || !files_.Empty());
}
void ZipAssetBundle::CheckDone(float /*frametime*/)
{
// Invoked in main thread context
{
Urho3D::MutexLock m(mutexDone_);
if (!done_)
return;
if (success_)
Loaded.Emit(this);
else
Failed.Emit(this);
}
StopThread();
assetAPI_->GetFramework()->Frame()->Updated.Disconnect(this, &ZipAssetBundle::CheckDone);
}
void ZipAssetBundle::WorkerDone(bool successful)
{
// Invoked in worker thread context
Urho3D::MutexLock m(mutexDone_);
done_ = true;
success_ = successful;
}
void ZipAssetBundle::StopThread()
{
if (worker_)
worker_->Stop();
SAFE_DELETE(worker_);
}
Urho3D::Context *ZipAssetBundle::Context() const
{
return assetAPI_->GetContext();
}
Urho3D::FileSystem *ZipAssetBundle::FileSystem() const
{
return assetAPI_->GetSubsystem<Urho3D::FileSystem>();
}
}
| realXtend/tundra-urho3d | src/Plugins/ZipPlugin/ZipAssetBundle.cpp | C++ | apache-2.0 | 7,865 |
import { Injectable } from "@angular/core";
import { InjectionFactory } from "../../L0/L0.injection-factory/injection-factory";
import { createSelector } from "../../L4/L4.ngrx/create-selector";
import { StatementsSelector } from "./statements.selector";
import { ExportDeclaration, SyntaxKind } from "typescript";
@Injectable()
export class ExportDeclarationsSelector implements InjectionFactory {
constructor(private readonly statementsSelector: StatementsSelector) {
return this.factory() as any;
}
factory() {
return createSelector(
this.statementsSelector,
statements => statements
.filter(({kind}) => kind === SyntaxKind.ExportDeclaration)
.map(item => item as ExportDeclaration)
);
}
}
| dvabuzyarov/moq.ts | projects/schematics/src/L2/L2.selectors/export-declarations.selector.ts | TypeScript | apache-2.0 | 791 |
using System;
using System.Collections.Generic;
using System.Configuration;
using System.Linq;
using System.Net;
using System.Net.Mail;
using System.Web;
namespace FashionStones.Utils
{
public class EmailSettings
{
public string Link = "www.fashion-stones.com.ua";
public string MailFromAddress = "[email protected]";
public string ServerName = "smtp.gmail.com";
public bool UseSsl = true;
public int ServerPort = 587; //465;
public string password = "8425999kapitoshka";
}
//public class GMailer
//{
// public static string GmailUsername { get { return "[email protected]"; } }
// public static string GmailPassword { get {return "8425999kapitoshka";} }
// public static int GmailPort { get; set; }
// public static bool GmailSSL { get; set; }
// public string ToEmail { get; set; }
// public string Subject { get; set; }
// public string Body { get; set; }
// public bool IsHtml { get; set; }
// static GMailer()
// {
// GmailHost = "smtp.gmail.com";
// GmailPort = 587; // Gmail can use ports 25, 465 & 587; but must be 25 for medium trust environment.
// GmailSSL = true;
// }
//public void Send()
//{
// SmtpClient smtp = new SmtpClient();
// smtp.Host = GmailHost;
// smtp.Port = GmailPort;
// smtp.EnableSsl = GmailSSL;
// smtp.DeliveryMethod = SmtpDeliveryMethod.Network;
// smtp.UseDefaultCredentials = false;
// smtp.Credentials = new NetworkCredential(GmailUsername, GmailPassword);
// using (var message = new MailMessage(GmailUsername, ToEmail))
// {
// message.Subject = Subject;
// message.Body = Body;
// message.IsBodyHtml = IsHtml;
// smtp.Send(message);
// }
//}
// }
} | dimakaminskiy/FashionStones | FashionStones/Utils/EmailSettings.cs | C# | apache-2.0 | 1,994 |
/*
* Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.cloud.spanner;
import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerException;
import static com.google.cloud.spanner.SpannerExceptionFactory.newSpannerExceptionForCancellation;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.google.common.base.Preconditions.checkState;
import com.google.api.client.util.BackOff;
import com.google.api.client.util.ExponentialBackOff;
import com.google.api.gax.retrying.RetrySettings;
import com.google.cloud.ByteArray;
import com.google.cloud.Date;
import com.google.cloud.Timestamp;
import com.google.cloud.spanner.Type.StructField;
import com.google.cloud.spanner.spi.v1.SpannerRpc;
import com.google.cloud.spanner.v1.stub.SpannerStubSettings;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.AbstractIterator;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.util.concurrent.Uninterruptibles;
import com.google.protobuf.ByteString;
import com.google.protobuf.ListValue;
import com.google.protobuf.Value.KindCase;
import com.google.spanner.v1.PartialResultSet;
import com.google.spanner.v1.ResultSetMetadata;
import com.google.spanner.v1.ResultSetStats;
import com.google.spanner.v1.Transaction;
import com.google.spanner.v1.TypeCode;
import io.grpc.Context;
import io.opencensus.common.Scope;
import io.opencensus.trace.AttributeValue;
import io.opencensus.trace.Span;
import io.opencensus.trace.Tracer;
import io.opencensus.trace.Tracing;
import java.io.IOException;
import java.io.Serializable;
import java.math.BigDecimal;
import java.util.AbstractList;
import java.util.ArrayList;
import java.util.BitSet;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.Executor;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.Nullable;
/** Implementation of {@link ResultSet}. */
abstract class AbstractResultSet<R> extends AbstractStructReader implements ResultSet {
private static final Tracer tracer = Tracing.getTracer();
interface Listener {
/**
* Called when transaction metadata is seen. This method may be invoked at most once. If the
* method is invoked, it will precede {@link #onError(SpannerException)} or {@link #onDone()}.
*/
void onTransactionMetadata(Transaction transaction, boolean shouldIncludeId)
throws SpannerException;
/** Called when the read finishes with an error. Returns the error that should be thrown. */
SpannerException onError(SpannerException e, boolean withBeginTransaction);
/** Called when the read finishes normally. */
void onDone(boolean withBeginTransaction);
}
@VisibleForTesting
static class GrpcResultSet extends AbstractResultSet<List<Object>> {
private final GrpcValueIterator iterator;
private final Listener listener;
private GrpcStruct currRow;
private SpannerException error;
private ResultSetStats statistics;
private boolean closed;
GrpcResultSet(CloseableIterator<PartialResultSet> iterator, Listener listener) {
this.iterator = new GrpcValueIterator(iterator);
this.listener = listener;
}
@Override
protected GrpcStruct currRow() {
checkState(!closed, "ResultSet is closed");
checkState(currRow != null, "next() call required");
return currRow;
}
@Override
public boolean next() throws SpannerException {
if (error != null) {
throw newSpannerException(error);
}
try {
if (currRow == null) {
ResultSetMetadata metadata = iterator.getMetadata();
if (metadata.hasTransaction()) {
listener.onTransactionMetadata(
metadata.getTransaction(), iterator.isWithBeginTransaction());
} else if (iterator.isWithBeginTransaction()) {
// The query should have returned a transaction.
throw SpannerExceptionFactory.newSpannerException(
ErrorCode.FAILED_PRECONDITION, AbstractReadContext.NO_TRANSACTION_RETURNED_MSG);
}
currRow = new GrpcStruct(iterator.type(), new ArrayList<>());
}
boolean hasNext = currRow.consumeRow(iterator);
if (!hasNext) {
statistics = iterator.getStats();
}
return hasNext;
} catch (Throwable t) {
throw yieldError(
SpannerExceptionFactory.asSpannerException(t),
iterator.isWithBeginTransaction() && currRow == null);
}
}
@Override
@Nullable
public ResultSetStats getStats() {
return statistics;
}
@Override
public void close() {
listener.onDone(iterator.isWithBeginTransaction());
iterator.close("ResultSet closed");
closed = true;
}
@Override
public Type getType() {
checkState(currRow != null, "next() call required");
return currRow.getType();
}
private SpannerException yieldError(SpannerException e, boolean beginTransaction) {
SpannerException toThrow = listener.onError(e, beginTransaction);
close();
throw toThrow;
}
}
/**
* Adapts a stream of {@code PartialResultSet} messages into a stream of {@code Value} messages.
*/
private static class GrpcValueIterator extends AbstractIterator<com.google.protobuf.Value> {
private enum StreamValue {
METADATA,
RESULT,
}
private final CloseableIterator<PartialResultSet> stream;
private ResultSetMetadata metadata;
private Type type;
private PartialResultSet current;
private int pos;
private ResultSetStats statistics;
GrpcValueIterator(CloseableIterator<PartialResultSet> stream) {
this.stream = stream;
}
@SuppressWarnings("unchecked")
@Override
protected com.google.protobuf.Value computeNext() {
if (!ensureReady(StreamValue.RESULT)) {
endOfData();
return null;
}
com.google.protobuf.Value value = current.getValues(pos++);
KindCase kind = value.getKindCase();
if (!isMergeable(kind)) {
if (pos == current.getValuesCount() && current.getChunkedValue()) {
throw newSpannerException(ErrorCode.INTERNAL, "Unexpected chunked PartialResultSet.");
} else {
return value;
}
}
if (!current.getChunkedValue() || pos != current.getValuesCount()) {
return value;
}
Object merged =
kind == KindCase.STRING_VALUE
? value.getStringValue()
: new ArrayList<>(value.getListValue().getValuesList());
while (current.getChunkedValue() && pos == current.getValuesCount()) {
if (!ensureReady(StreamValue.RESULT)) {
throw newSpannerException(
ErrorCode.INTERNAL, "Stream closed in the middle of chunked value");
}
com.google.protobuf.Value newValue = current.getValues(pos++);
if (newValue.getKindCase() != kind) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Unexpected type in middle of chunked value. Expected: "
+ kind
+ " but got: "
+ newValue.getKindCase());
}
if (kind == KindCase.STRING_VALUE) {
merged = merged + newValue.getStringValue();
} else {
concatLists(
(List<com.google.protobuf.Value>) merged, newValue.getListValue().getValuesList());
}
}
if (kind == KindCase.STRING_VALUE) {
return com.google.protobuf.Value.newBuilder().setStringValue((String) merged).build();
} else {
return com.google.protobuf.Value.newBuilder()
.setListValue(
ListValue.newBuilder().addAllValues((List<com.google.protobuf.Value>) merged))
.build();
}
}
ResultSetMetadata getMetadata() throws SpannerException {
if (metadata == null) {
if (!ensureReady(StreamValue.METADATA)) {
throw newSpannerException(ErrorCode.INTERNAL, "Stream closed without sending metadata");
}
}
return metadata;
}
/**
* Get the query statistics. Query statistics are delivered with the last PartialResultSet in
* the stream. Any attempt to call this method before the caller has finished consuming the
* results will return null.
*/
@Nullable
ResultSetStats getStats() {
return statistics;
}
Type type() {
checkState(type != null, "metadata has not been received");
return type;
}
private boolean ensureReady(StreamValue requiredValue) throws SpannerException {
while (current == null || pos >= current.getValuesCount()) {
if (!stream.hasNext()) {
return false;
}
current = stream.next();
pos = 0;
if (type == null) {
// This is the first message on the stream.
if (!current.hasMetadata() || !current.getMetadata().hasRowType()) {
throw newSpannerException(ErrorCode.INTERNAL, "Missing type metadata in first message");
}
metadata = current.getMetadata();
com.google.spanner.v1.Type typeProto =
com.google.spanner.v1.Type.newBuilder()
.setCode(TypeCode.STRUCT)
.setStructType(metadata.getRowType())
.build();
try {
type = Type.fromProto(typeProto);
} catch (IllegalArgumentException e) {
throw newSpannerException(
ErrorCode.INTERNAL, "Invalid type metadata: " + e.getMessage(), e);
}
}
if (current.hasStats()) {
statistics = current.getStats();
}
if (requiredValue == StreamValue.METADATA) {
return true;
}
}
return true;
}
void close(@Nullable String message) {
stream.close(message);
}
boolean isWithBeginTransaction() {
return stream.isWithBeginTransaction();
}
/** @param a is a mutable list and b will be concatenated into a. */
private void concatLists(List<com.google.protobuf.Value> a, List<com.google.protobuf.Value> b) {
if (a.size() == 0 || b.size() == 0) {
a.addAll(b);
return;
} else {
com.google.protobuf.Value last = a.get(a.size() - 1);
com.google.protobuf.Value first = b.get(0);
KindCase lastKind = last.getKindCase();
KindCase firstKind = first.getKindCase();
if (isMergeable(lastKind) && lastKind == firstKind) {
com.google.protobuf.Value merged;
if (lastKind == KindCase.STRING_VALUE) {
String lastStr = last.getStringValue();
String firstStr = first.getStringValue();
merged =
com.google.protobuf.Value.newBuilder().setStringValue(lastStr + firstStr).build();
} else { // List
List<com.google.protobuf.Value> mergedList = new ArrayList<>();
mergedList.addAll(last.getListValue().getValuesList());
concatLists(mergedList, first.getListValue().getValuesList());
merged =
com.google.protobuf.Value.newBuilder()
.setListValue(ListValue.newBuilder().addAllValues(mergedList))
.build();
}
a.set(a.size() - 1, merged);
a.addAll(b.subList(1, b.size()));
} else {
a.addAll(b);
}
}
}
private boolean isMergeable(KindCase kind) {
return kind == KindCase.STRING_VALUE || kind == KindCase.LIST_VALUE;
}
}
static class GrpcStruct extends Struct implements Serializable {
private final Type type;
private final List<Object> rowData;
/**
* Builds an immutable version of this struct using {@link Struct#newBuilder()} which is used as
* a serialization proxy.
*/
private Object writeReplace() {
Builder builder = Struct.newBuilder();
List<Type.StructField> structFields = getType().getStructFields();
for (int i = 0; i < structFields.size(); i++) {
Type.StructField field = structFields.get(i);
String fieldName = field.getName();
Object value = rowData.get(i);
Type fieldType = field.getType();
switch (fieldType.getCode()) {
case BOOL:
builder.set(fieldName).to((Boolean) value);
break;
case INT64:
builder.set(fieldName).to((Long) value);
break;
case FLOAT64:
builder.set(fieldName).to((Double) value);
break;
case NUMERIC:
builder.set(fieldName).to((BigDecimal) value);
break;
case STRING:
builder.set(fieldName).to((String) value);
break;
case JSON:
builder.set(fieldName).to(Value.json((String) value));
break;
case BYTES:
builder.set(fieldName).to((ByteArray) value);
break;
case TIMESTAMP:
builder.set(fieldName).to((Timestamp) value);
break;
case DATE:
builder.set(fieldName).to((Date) value);
break;
case ARRAY:
switch (fieldType.getArrayElementType().getCode()) {
case BOOL:
builder.set(fieldName).toBoolArray((Iterable<Boolean>) value);
break;
case INT64:
builder.set(fieldName).toInt64Array((Iterable<Long>) value);
break;
case FLOAT64:
builder.set(fieldName).toFloat64Array((Iterable<Double>) value);
break;
case NUMERIC:
builder.set(fieldName).toNumericArray((Iterable<BigDecimal>) value);
break;
case STRING:
builder.set(fieldName).toStringArray((Iterable<String>) value);
break;
case JSON:
builder.set(fieldName).toJsonArray((Iterable<String>) value);
break;
case BYTES:
builder.set(fieldName).toBytesArray((Iterable<ByteArray>) value);
break;
case TIMESTAMP:
builder.set(fieldName).toTimestampArray((Iterable<Timestamp>) value);
break;
case DATE:
builder.set(fieldName).toDateArray((Iterable<Date>) value);
break;
case STRUCT:
builder
.set(fieldName)
.toStructArray(fieldType.getArrayElementType(), (Iterable<Struct>) value);
break;
default:
throw new AssertionError(
"Unhandled array type code: " + fieldType.getArrayElementType());
}
break;
case STRUCT:
if (value == null) {
builder.set(fieldName).to(fieldType, null);
} else {
builder.set(fieldName).to((Struct) value);
}
break;
default:
throw new AssertionError("Unhandled type code: " + fieldType.getCode());
}
}
return builder.build();
}
GrpcStruct(Type type, List<Object> rowData) {
this.type = type;
this.rowData = rowData;
}
@Override
public String toString() {
return this.rowData.toString();
}
boolean consumeRow(Iterator<com.google.protobuf.Value> iterator) {
rowData.clear();
if (!iterator.hasNext()) {
return false;
}
for (Type.StructField fieldType : getType().getStructFields()) {
if (!iterator.hasNext()) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Invalid value stream: end of stream reached before row is complete");
}
com.google.protobuf.Value value = iterator.next();
rowData.add(decodeValue(fieldType.getType(), value));
}
return true;
}
private static Object decodeValue(Type fieldType, com.google.protobuf.Value proto) {
if (proto.getKindCase() == KindCase.NULL_VALUE) {
return null;
}
switch (fieldType.getCode()) {
case BOOL:
checkType(fieldType, proto, KindCase.BOOL_VALUE);
return proto.getBoolValue();
case INT64:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return Long.parseLong(proto.getStringValue());
case FLOAT64:
return valueProtoToFloat64(proto);
case NUMERIC:
return new BigDecimal(proto.getStringValue());
case STRING:
case JSON:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return proto.getStringValue();
case BYTES:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return ByteArray.fromBase64(proto.getStringValue());
case TIMESTAMP:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return Timestamp.parseTimestamp(proto.getStringValue());
case DATE:
checkType(fieldType, proto, KindCase.STRING_VALUE);
return Date.parseDate(proto.getStringValue());
case ARRAY:
checkType(fieldType, proto, KindCase.LIST_VALUE);
ListValue listValue = proto.getListValue();
return decodeArrayValue(fieldType.getArrayElementType(), listValue);
case STRUCT:
checkType(fieldType, proto, KindCase.LIST_VALUE);
ListValue structValue = proto.getListValue();
return decodeStructValue(fieldType, structValue);
default:
throw new AssertionError("Unhandled type code: " + fieldType.getCode());
}
}
private static Struct decodeStructValue(Type structType, ListValue structValue) {
List<Type.StructField> fieldTypes = structType.getStructFields();
checkArgument(
structValue.getValuesCount() == fieldTypes.size(),
"Size mismatch between type descriptor and actual values.");
List<Object> fields = new ArrayList<>(fieldTypes.size());
List<com.google.protobuf.Value> fieldValues = structValue.getValuesList();
for (int i = 0; i < fieldTypes.size(); ++i) {
fields.add(decodeValue(fieldTypes.get(i).getType(), fieldValues.get(i)));
}
return new GrpcStruct(structType, fields);
}
static Object decodeArrayValue(Type elementType, ListValue listValue) {
switch (elementType.getCode()) {
case BOOL:
// Use a view: element conversion is virtually free.
return Lists.transform(
listValue.getValuesList(),
input -> input.getKindCase() == KindCase.NULL_VALUE ? null : input.getBoolValue());
case INT64:
// For int64/float64 types, use custom containers. These avoid wrapper object
// creation for non-null arrays.
return new Int64Array(listValue);
case FLOAT64:
return new Float64Array(listValue);
case NUMERIC:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: new BigDecimal(value.getStringValue()));
}
return list;
}
case STRING:
case JSON:
return Lists.transform(
listValue.getValuesList(),
input -> input.getKindCase() == KindCase.NULL_VALUE ? null : input.getStringValue());
case BYTES:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: ByteArray.fromBase64(value.getStringValue()));
}
return list;
}
case TIMESTAMP:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: Timestamp.parseTimestamp(value.getStringValue()));
}
return list;
}
case DATE:
{
// Materialize list: element conversion is expensive and should happen only once.
ArrayList<Object> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
list.add(
value.getKindCase() == KindCase.NULL_VALUE
? null
: Date.parseDate(value.getStringValue()));
}
return list;
}
case STRUCT:
{
ArrayList<Struct> list = new ArrayList<>(listValue.getValuesCount());
for (com.google.protobuf.Value value : listValue.getValuesList()) {
if (value.getKindCase() == KindCase.NULL_VALUE) {
list.add(null);
} else {
ListValue structValue = value.getListValue();
list.add(decodeStructValue(elementType, structValue));
}
}
return list;
}
default:
throw new AssertionError("Unhandled type code: " + elementType.getCode());
}
}
private static void checkType(
Type fieldType, com.google.protobuf.Value proto, KindCase expected) {
if (proto.getKindCase() != expected) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Invalid value for column type "
+ fieldType
+ " expected "
+ expected
+ " but was "
+ proto.getKindCase());
}
}
Struct immutableCopy() {
return new GrpcStruct(type, new ArrayList<>(rowData));
}
@Override
public Type getType() {
return type;
}
@Override
public boolean isNull(int columnIndex) {
return rowData.get(columnIndex) == null;
}
@Override
protected boolean getBooleanInternal(int columnIndex) {
return (Boolean) rowData.get(columnIndex);
}
@Override
protected long getLongInternal(int columnIndex) {
return (Long) rowData.get(columnIndex);
}
@Override
protected double getDoubleInternal(int columnIndex) {
return (Double) rowData.get(columnIndex);
}
@Override
protected BigDecimal getBigDecimalInternal(int columnIndex) {
return (BigDecimal) rowData.get(columnIndex);
}
@Override
protected String getStringInternal(int columnIndex) {
return (String) rowData.get(columnIndex);
}
@Override
protected String getJsonInternal(int columnIndex) {
return (String) rowData.get(columnIndex);
}
@Override
protected ByteArray getBytesInternal(int columnIndex) {
return (ByteArray) rowData.get(columnIndex);
}
@Override
protected Timestamp getTimestampInternal(int columnIndex) {
return (Timestamp) rowData.get(columnIndex);
}
@Override
protected Date getDateInternal(int columnIndex) {
return (Date) rowData.get(columnIndex);
}
@Override
protected Value getValueInternal(int columnIndex) {
final List<Type.StructField> structFields = getType().getStructFields();
final StructField structField = structFields.get(columnIndex);
final Type columnType = structField.getType();
final boolean isNull = rowData.get(columnIndex) == null;
switch (columnType.getCode()) {
case BOOL:
return Value.bool(isNull ? null : getBooleanInternal(columnIndex));
case INT64:
return Value.int64(isNull ? null : getLongInternal(columnIndex));
case NUMERIC:
return Value.numeric(isNull ? null : getBigDecimalInternal(columnIndex));
case FLOAT64:
return Value.float64(isNull ? null : getDoubleInternal(columnIndex));
case STRING:
return Value.string(isNull ? null : getStringInternal(columnIndex));
case BYTES:
return Value.bytes(isNull ? null : getBytesInternal(columnIndex));
case TIMESTAMP:
return Value.timestamp(isNull ? null : getTimestampInternal(columnIndex));
case DATE:
return Value.date(isNull ? null : getDateInternal(columnIndex));
case STRUCT:
return Value.struct(isNull ? null : getStructInternal(columnIndex));
case ARRAY:
switch (columnType.getArrayElementType().getCode()) {
case BOOL:
return Value.boolArray(isNull ? null : getBooleanListInternal(columnIndex));
case INT64:
return Value.int64Array(isNull ? null : getLongListInternal(columnIndex));
case NUMERIC:
return Value.numericArray(isNull ? null : getBigDecimalListInternal(columnIndex));
case FLOAT64:
return Value.float64Array(isNull ? null : getDoubleListInternal(columnIndex));
case STRING:
return Value.stringArray(isNull ? null : getStringListInternal(columnIndex));
case BYTES:
return Value.bytesArray(isNull ? null : getBytesListInternal(columnIndex));
case TIMESTAMP:
return Value.timestampArray(isNull ? null : getTimestampListInternal(columnIndex));
case DATE:
return Value.dateArray(isNull ? null : getDateListInternal(columnIndex));
case STRUCT:
return Value.structArray(
columnType.getArrayElementType(),
isNull ? null : getStructListInternal(columnIndex));
default:
throw new IllegalArgumentException(
"Invalid array value type " + this.type.getArrayElementType());
}
default:
throw new IllegalArgumentException("Invalid value type " + this.type);
}
}
@Override
protected Struct getStructInternal(int columnIndex) {
return (Struct) rowData.get(columnIndex);
}
@Override
protected boolean[] getBooleanArrayInternal(int columnIndex) {
@SuppressWarnings("unchecked") // We know ARRAY<BOOL> produces a List<Boolean>.
List<Boolean> values = (List<Boolean>) rowData.get(columnIndex);
boolean[] r = new boolean[values.size()];
for (int i = 0; i < values.size(); ++i) {
if (values.get(i) == null) {
throw throwNotNull(columnIndex);
}
r[i] = values.get(i);
}
return r;
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<BOOL> produces a List<Boolean>.
protected List<Boolean> getBooleanListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Boolean>) rowData.get(columnIndex));
}
@Override
protected long[] getLongArrayInternal(int columnIndex) {
return getLongListInternal(columnIndex).toPrimitiveArray(columnIndex);
}
@Override
protected Int64Array getLongListInternal(int columnIndex) {
return (Int64Array) rowData.get(columnIndex);
}
@Override
protected double[] getDoubleArrayInternal(int columnIndex) {
return getDoubleListInternal(columnIndex).toPrimitiveArray(columnIndex);
}
@Override
protected Float64Array getDoubleListInternal(int columnIndex) {
return (Float64Array) rowData.get(columnIndex);
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<NUMERIC> produces a List<BigDecimal>.
protected List<BigDecimal> getBigDecimalListInternal(int columnIndex) {
return (List<BigDecimal>) rowData.get(columnIndex);
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<STRING> produces a List<String>.
protected List<String> getStringListInternal(int columnIndex) {
return Collections.unmodifiableList((List<String>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<String> produces a List<String>.
protected List<String> getJsonListInternal(int columnIndex) {
return Collections.unmodifiableList((List<String>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<BYTES> produces a List<ByteArray>.
protected List<ByteArray> getBytesListInternal(int columnIndex) {
return Collections.unmodifiableList((List<ByteArray>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<TIMESTAMP> produces a List<Timestamp>.
protected List<Timestamp> getTimestampListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Timestamp>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<DATE> produces a List<Date>.
protected List<Date> getDateListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Date>) rowData.get(columnIndex));
}
@Override
@SuppressWarnings("unchecked") // We know ARRAY<STRUCT<...>> produces a List<STRUCT>.
protected List<Struct> getStructListInternal(int columnIndex) {
return Collections.unmodifiableList((List<Struct>) rowData.get(columnIndex));
}
}
@VisibleForTesting
interface CloseableIterator<T> extends Iterator<T> {
/**
* Closes the iterator, freeing any underlying resources.
*
* @param message a message to include in the final RPC status
*/
void close(@Nullable String message);
boolean isWithBeginTransaction();
}
/** Adapts a streaming read/query call into an iterator over partial result sets. */
@VisibleForTesting
static class GrpcStreamIterator extends AbstractIterator<PartialResultSet>
implements CloseableIterator<PartialResultSet> {
private static final Logger logger = Logger.getLogger(GrpcStreamIterator.class.getName());
private static final PartialResultSet END_OF_STREAM = PartialResultSet.newBuilder().build();
private final ConsumerImpl consumer = new ConsumerImpl();
private final BlockingQueue<PartialResultSet> stream;
private final Statement statement;
private SpannerRpc.StreamingCall call;
private volatile boolean withBeginTransaction;
private SpannerException error;
@VisibleForTesting
GrpcStreamIterator(int prefetchChunks) {
this(null, prefetchChunks);
}
@VisibleForTesting
GrpcStreamIterator(Statement statement, int prefetchChunks) {
this.statement = statement;
// One extra to allow for END_OF_STREAM message.
this.stream = new LinkedBlockingQueue<>(prefetchChunks + 1);
}
protected final SpannerRpc.ResultStreamConsumer consumer() {
return consumer;
}
public void setCall(SpannerRpc.StreamingCall call, boolean withBeginTransaction) {
this.call = call;
this.withBeginTransaction = withBeginTransaction;
}
@Override
public void close(@Nullable String message) {
if (call != null) {
call.cancel(message);
}
}
@Override
public boolean isWithBeginTransaction() {
return withBeginTransaction;
}
@Override
protected final PartialResultSet computeNext() {
PartialResultSet next;
try {
// TODO: Ideally honor io.grpc.Context while blocking here. In practice,
// cancellation/deadline results in an error being delivered to "stream", which
// should mean that we do not block significantly longer afterwards, but it would
// be more robust to use poll() with a timeout.
next = stream.take();
} catch (InterruptedException e) {
// Treat interrupt as a request to cancel the read.
throw SpannerExceptionFactory.propagateInterrupt(e);
}
if (next != END_OF_STREAM) {
call.request(1);
return next;
}
// All done - close() no longer needs to cancel the call.
call = null;
if (error != null) {
throw SpannerExceptionFactory.newSpannerException(error);
}
endOfData();
return null;
}
private void addToStream(PartialResultSet results) {
// We assume that nothing from the user will interrupt gRPC event threads.
Uninterruptibles.putUninterruptibly(stream, results);
}
private class ConsumerImpl implements SpannerRpc.ResultStreamConsumer {
@Override
public void onPartialResultSet(PartialResultSet results) {
addToStream(results);
}
@Override
public void onCompleted() {
addToStream(END_OF_STREAM);
}
@Override
public void onError(SpannerException e) {
if (statement != null) {
if (logger.isLoggable(Level.FINEST)) {
// Include parameter values if logging level is set to FINEST or higher.
e =
SpannerExceptionFactory.newSpannerExceptionPreformatted(
e.getErrorCode(),
String.format("%s - Statement: '%s'", e.getMessage(), statement.toString()),
e);
logger.log(Level.FINEST, "Error executing statement", e);
} else {
e =
SpannerExceptionFactory.newSpannerExceptionPreformatted(
e.getErrorCode(),
String.format("%s - Statement: '%s'", e.getMessage(), statement.getSql()),
e);
}
}
error = e;
addToStream(END_OF_STREAM);
}
}
}
/**
* Wraps an iterator over partial result sets, supporting resuming RPCs on error. This class keeps
* track of the most recent resume token seen, and will buffer partial result set chunks that do
* not have a resume token until one is seen or buffer space is exceeded, which reduces the chance
* of yielding data to the caller that cannot be resumed.
*/
@VisibleForTesting
abstract static class ResumableStreamIterator extends AbstractIterator<PartialResultSet>
implements CloseableIterator<PartialResultSet> {
private static final RetrySettings STREAMING_RETRY_SETTINGS =
SpannerStubSettings.newBuilder().executeStreamingSqlSettings().getRetrySettings();
private static final Logger logger = Logger.getLogger(ResumableStreamIterator.class.getName());
private final BackOff backOff = newBackOff();
private final LinkedList<PartialResultSet> buffer = new LinkedList<>();
private final int maxBufferSize;
private final Span span;
private CloseableIterator<PartialResultSet> stream;
private ByteString resumeToken;
private boolean finished;
/**
* Indicates whether it is currently safe to retry RPCs. This will be {@code false} if we have
* reached the maximum buffer size without seeing a restart token; in this case, we will drain
* the buffer and remain in this state until we see a new restart token.
*/
private boolean safeToRetry = true;
protected ResumableStreamIterator(int maxBufferSize, String streamName, Span parent) {
checkArgument(maxBufferSize >= 0);
this.maxBufferSize = maxBufferSize;
this.span = tracer.spanBuilderWithExplicitParent(streamName, parent).startSpan();
}
private static ExponentialBackOff newBackOff() {
return new ExponentialBackOff.Builder()
.setMultiplier(STREAMING_RETRY_SETTINGS.getRetryDelayMultiplier())
.setInitialIntervalMillis(
Math.max(10, (int) STREAMING_RETRY_SETTINGS.getInitialRetryDelay().toMillis()))
.setMaxIntervalMillis(
Math.max(1000, (int) STREAMING_RETRY_SETTINGS.getMaxRetryDelay().toMillis()))
.setMaxElapsedTimeMillis(Integer.MAX_VALUE) // Prevent Backoff.STOP from getting returned.
.build();
}
private static void backoffSleep(Context context, BackOff backoff) throws SpannerException {
backoffSleep(context, nextBackOffMillis(backoff));
}
private static long nextBackOffMillis(BackOff backoff) throws SpannerException {
try {
return backoff.nextBackOffMillis();
} catch (IOException e) {
throw newSpannerException(ErrorCode.INTERNAL, e.getMessage(), e);
}
}
private static void backoffSleep(Context context, long backoffMillis) throws SpannerException {
tracer
.getCurrentSpan()
.addAnnotation(
"Backing off",
ImmutableMap.of("Delay", AttributeValue.longAttributeValue(backoffMillis)));
final CountDownLatch latch = new CountDownLatch(1);
final Context.CancellationListener listener =
ignored -> {
// Wakeup on cancellation / DEADLINE_EXCEEDED.
latch.countDown();
};
context.addListener(listener, DirectExecutor.INSTANCE);
try {
if (backoffMillis == BackOff.STOP) {
// Highly unlikely but we handle it just in case.
backoffMillis = STREAMING_RETRY_SETTINGS.getMaxRetryDelay().toMillis();
}
if (latch.await(backoffMillis, TimeUnit.MILLISECONDS)) {
// Woken by context cancellation.
throw newSpannerExceptionForCancellation(context, null);
}
} catch (InterruptedException interruptExcept) {
throw newSpannerExceptionForCancellation(context, interruptExcept);
} finally {
context.removeListener(listener);
}
}
private enum DirectExecutor implements Executor {
INSTANCE;
@Override
public void execute(Runnable command) {
command.run();
}
}
abstract CloseableIterator<PartialResultSet> startStream(@Nullable ByteString resumeToken);
@Override
public void close(@Nullable String message) {
if (stream != null) {
stream.close(message);
span.end(TraceUtil.END_SPAN_OPTIONS);
stream = null;
}
}
@Override
public boolean isWithBeginTransaction() {
return stream != null && stream.isWithBeginTransaction();
}
@Override
protected PartialResultSet computeNext() {
Context context = Context.current();
while (true) {
// Eagerly start stream before consuming any buffered items.
if (stream == null) {
span.addAnnotation(
"Starting/Resuming stream",
ImmutableMap.of(
"ResumeToken",
AttributeValue.stringAttributeValue(
resumeToken == null ? "null" : resumeToken.toStringUtf8())));
try (Scope s = tracer.withSpan(span)) {
// When start a new stream set the Span as current to make the gRPC Span a child of
// this Span.
stream = checkNotNull(startStream(resumeToken));
}
}
// Buffer contains items up to a resume token or has reached capacity: flush.
if (!buffer.isEmpty()
&& (finished || !safeToRetry || !buffer.getLast().getResumeToken().isEmpty())) {
return buffer.pop();
}
try {
if (stream.hasNext()) {
PartialResultSet next = stream.next();
boolean hasResumeToken = !next.getResumeToken().isEmpty();
if (hasResumeToken) {
resumeToken = next.getResumeToken();
safeToRetry = true;
}
// If the buffer is empty and this chunk has a resume token or we cannot resume safely
// anyway, we can yield it immediately rather than placing it in the buffer to be
// returned on the next iteration.
if ((hasResumeToken || !safeToRetry) && buffer.isEmpty()) {
return next;
}
buffer.add(next);
if (buffer.size() > maxBufferSize && buffer.getLast().getResumeToken().isEmpty()) {
// We need to flush without a restart token. Errors encountered until we see
// such a token will fail the read.
safeToRetry = false;
}
} else {
finished = true;
if (buffer.isEmpty()) {
endOfData();
return null;
}
}
} catch (SpannerException e) {
if (safeToRetry && e.isRetryable()) {
span.addAnnotation(
"Stream broken. Safe to retry", TraceUtil.getExceptionAnnotations(e));
logger.log(Level.FINE, "Retryable exception, will sleep and retry", e);
// Truncate any items in the buffer before the last retry token.
while (!buffer.isEmpty() && buffer.getLast().getResumeToken().isEmpty()) {
buffer.removeLast();
}
assert buffer.isEmpty() || buffer.getLast().getResumeToken().equals(resumeToken);
stream = null;
try (Scope s = tracer.withSpan(span)) {
long delay = e.getRetryDelayInMillis();
if (delay != -1) {
backoffSleep(context, delay);
} else {
backoffSleep(context, backOff);
}
}
continue;
}
span.addAnnotation("Stream broken. Not safe to retry");
TraceUtil.setWithFailure(span, e);
throw e;
} catch (RuntimeException e) {
span.addAnnotation("Stream broken. Not safe to retry");
TraceUtil.setWithFailure(span, e);
throw e;
}
}
}
}
static double valueProtoToFloat64(com.google.protobuf.Value proto) {
if (proto.getKindCase() == KindCase.STRING_VALUE) {
switch (proto.getStringValue()) {
case "-Infinity":
return Double.NEGATIVE_INFINITY;
case "Infinity":
return Double.POSITIVE_INFINITY;
case "NaN":
return Double.NaN;
default:
// Fall-through to handling below to produce an error.
}
}
if (proto.getKindCase() != KindCase.NUMBER_VALUE) {
throw newSpannerException(
ErrorCode.INTERNAL,
"Invalid value for column type "
+ Type.float64()
+ " expected NUMBER_VALUE or STRING_VALUE with value one of"
+ " \"Infinity\", \"-Infinity\", or \"NaN\" but was "
+ proto.getKindCase()
+ (proto.getKindCase() == KindCase.STRING_VALUE
? " with value \"" + proto.getStringValue() + "\""
: ""));
}
return proto.getNumberValue();
}
static NullPointerException throwNotNull(int columnIndex) {
throw new NullPointerException(
"Cannot call array getter for column " + columnIndex + " with null elements");
}
/**
* Memory-optimized base class for {@code ARRAY<INT64>} and {@code ARRAY<FLOAT64>} types. Both of
* these involve conversions from the type yielded by JSON parsing, which are {@code String} and
* {@code BigDecimal} respectively. Rather than construct new wrapper objects for each array
* element, we use primitive arrays and a {@code BitSet} to track nulls.
*/
abstract static class PrimitiveArray<T, A> extends AbstractList<T> {
private final A data;
private final BitSet nulls;
private final int size;
PrimitiveArray(ListValue protoList) {
this.size = protoList.getValuesCount();
A data = newArray(size);
BitSet nulls = new BitSet(size);
for (int i = 0; i < protoList.getValuesCount(); ++i) {
if (protoList.getValues(i).getKindCase() == KindCase.NULL_VALUE) {
nulls.set(i);
} else {
setProto(data, i, protoList.getValues(i));
}
}
this.data = data;
this.nulls = nulls;
}
PrimitiveArray(A data, BitSet nulls, int size) {
this.data = data;
this.nulls = nulls;
this.size = size;
}
abstract A newArray(int size);
abstract void setProto(A array, int i, com.google.protobuf.Value protoValue);
abstract T get(A array, int i);
@Override
public T get(int index) {
if (index < 0 || index >= size) {
throw new ArrayIndexOutOfBoundsException("index=" + index + " size=" + size);
}
return nulls.get(index) ? null : get(data, index);
}
@Override
public int size() {
return size;
}
A toPrimitiveArray(int columnIndex) {
if (nulls.length() > 0) {
throw throwNotNull(columnIndex);
}
A r = newArray(size);
System.arraycopy(data, 0, r, 0, size);
return r;
}
}
static class Int64Array extends PrimitiveArray<Long, long[]> {
Int64Array(ListValue protoList) {
super(protoList);
}
Int64Array(long[] data, BitSet nulls) {
super(data, nulls, data.length);
}
@Override
long[] newArray(int size) {
return new long[size];
}
@Override
void setProto(long[] array, int i, com.google.protobuf.Value protoValue) {
array[i] = Long.parseLong(protoValue.getStringValue());
}
@Override
Long get(long[] array, int i) {
return array[i];
}
}
static class Float64Array extends PrimitiveArray<Double, double[]> {
Float64Array(ListValue protoList) {
super(protoList);
}
Float64Array(double[] data, BitSet nulls) {
super(data, nulls, data.length);
}
@Override
double[] newArray(int size) {
return new double[size];
}
@Override
void setProto(double[] array, int i, com.google.protobuf.Value protoValue) {
array[i] = valueProtoToFloat64(protoValue);
}
@Override
Double get(double[] array, int i) {
return array[i];
}
}
protected abstract GrpcStruct currRow();
@Override
public Struct getCurrentRowAsStruct() {
return currRow().immutableCopy();
}
@Override
protected boolean getBooleanInternal(int columnIndex) {
return currRow().getBooleanInternal(columnIndex);
}
@Override
protected long getLongInternal(int columnIndex) {
return currRow().getLongInternal(columnIndex);
}
@Override
protected double getDoubleInternal(int columnIndex) {
return currRow().getDoubleInternal(columnIndex);
}
@Override
protected BigDecimal getBigDecimalInternal(int columnIndex) {
return currRow().getBigDecimalInternal(columnIndex);
}
@Override
protected String getStringInternal(int columnIndex) {
return currRow().getStringInternal(columnIndex);
}
@Override
protected String getJsonInternal(int columnIndex) {
return currRow().getJsonInternal(columnIndex);
}
@Override
protected ByteArray getBytesInternal(int columnIndex) {
return currRow().getBytesInternal(columnIndex);
}
@Override
protected Timestamp getTimestampInternal(int columnIndex) {
return currRow().getTimestampInternal(columnIndex);
}
@Override
protected Date getDateInternal(int columnIndex) {
return currRow().getDateInternal(columnIndex);
}
@Override
protected Value getValueInternal(int columnIndex) {
return currRow().getValueInternal(columnIndex);
}
@Override
protected boolean[] getBooleanArrayInternal(int columnIndex) {
return currRow().getBooleanArrayInternal(columnIndex);
}
@Override
protected List<Boolean> getBooleanListInternal(int columnIndex) {
return currRow().getBooleanListInternal(columnIndex);
}
@Override
protected long[] getLongArrayInternal(int columnIndex) {
return currRow().getLongArrayInternal(columnIndex);
}
@Override
protected List<Long> getLongListInternal(int columnIndex) {
return currRow().getLongListInternal(columnIndex);
}
@Override
protected double[] getDoubleArrayInternal(int columnIndex) {
return currRow().getDoubleArrayInternal(columnIndex);
}
@Override
protected List<Double> getDoubleListInternal(int columnIndex) {
return currRow().getDoubleListInternal(columnIndex);
}
@Override
protected List<BigDecimal> getBigDecimalListInternal(int columnIndex) {
return currRow().getBigDecimalListInternal(columnIndex);
}
@Override
protected List<String> getStringListInternal(int columnIndex) {
return currRow().getStringListInternal(columnIndex);
}
@Override
protected List<String> getJsonListInternal(int columnIndex) {
return currRow().getJsonListInternal(columnIndex);
}
@Override
protected List<ByteArray> getBytesListInternal(int columnIndex) {
return currRow().getBytesListInternal(columnIndex);
}
@Override
protected List<Timestamp> getTimestampListInternal(int columnIndex) {
return currRow().getTimestampListInternal(columnIndex);
}
@Override
protected List<Date> getDateListInternal(int columnIndex) {
return currRow().getDateListInternal(columnIndex);
}
@Override
protected List<Struct> getStructListInternal(int columnIndex) {
return currRow().getStructListInternal(columnIndex);
}
@Override
public boolean isNull(int columnIndex) {
return currRow().isNull(columnIndex);
}
}
| looker-open-source/java-spanner | google-cloud-spanner/src/main/java/com/google/cloud/spanner/AbstractResultSet.java | Java | apache-2.0 | 50,481 |
/*
* Copyright 2010 The Closure Compiler Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.javascript.jscomp;
import com.google.common.collect.ImmutableSet;
import com.google.javascript.jscomp.NodeTraversal.AbstractPostOrderCallback;
import com.google.javascript.jscomp.SpecializeModule.SpecializationState;
import com.google.javascript.rhino.Node;
/**
* Tests for {@link SpecializeModule}.
*
* @author [email protected] (Devin Coughlin)
*/
public class SpecializeModuleTest extends CompilerTestCase {
private static final String SHARED_EXTERNS = "var alert = function() {}";
public SpecializeModuleTest() {
super(SHARED_EXTERNS);
}
private PassFactory inlineFunctions =
new PassFactory("inlineFunctions", true) {
@Override
protected CompilerPass createInternal(AbstractCompiler compiler) {
return new InlineFunctions(compiler,
compiler.getUniqueNameIdSupplier(), true, false, true);
}
};
private PassFactory removeUnusedPrototypeProperties =
new PassFactory("removeUnusedPrototypeProperties", true) {
@Override
protected CompilerPass createInternal(AbstractCompiler compiler) {
return new RemoveUnusedPrototypeProperties(compiler, false, false);
}
};
private PassFactory devirtualizePrototypeMethods =
new PassFactory("devirtualizePrototypeMethods", true) {
@Override
protected CompilerPass createInternal(AbstractCompiler compiler) {
return new DevirtualizePrototypeMethods(compiler);
}
};
@Override
protected CompilerPass getProcessor(final Compiler compiler) {
final SpecializeModule specializeModule = new SpecializeModule(compiler,
devirtualizePrototypeMethods, inlineFunctions,
removeUnusedPrototypeProperties);
return new CompilerPass() {
public void process(Node externs, Node root) {
specializeModule.process(externs, root);
/* Make sure variables are declared before used */
new VarCheck(compiler).process(externs, root);
}
};
}
@Override
public void setUp() throws Exception {
super.setUp();
enableNormalize();
}
public void testSpecializeInline() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B());A()};" +
"var B = function() {return 6};" +
"A();",
// m2
"A();" +
"B();" +
"B = function() {return 7};" +
"A();" +
"B();"
);
test(modules, new String[] {
// m1
"var A = function() {alert(6);A()};" + /* Specialized A */
"A();" +
"var B;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"A();" +
"B();" +
"B = function() {return 7};" +
"A();" +
"B();"
});
}
public void testSpecializeCascadedInline() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B());A()};" +
"var B = function() {return C()};" +
"var C = function() {return 6};" +
"A();",
// m2
"B = function() {return 7};" +
"A();");
test(modules, new String[] {
// m1
"var A = function() {alert(6);A()};" + /* Specialized A */
"A();" +
"var B, C;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return C()};" + /* Removed from m1, so add to m2 */
"C = function() {return 6};" + /* Removed from m1, so add to m2 */
"B = function() {return 7};" +
"A();"
});
}
public void testSpecializeInlineWithMultipleDependents() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B());A()};" +
"var B = function() {return 6};" +
"A();",
// m2
"B = function() {return 7};" +
"A();",
// m3
"A();"
);
test(modules, new String[] {
// m1
"var A = function() {alert(6);A()};" + /* Specialized A */
"A();" +
"var B;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"B = function() {return 7};" +
"A();",
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"A();",
});
}
public void testSpecializeInlineWithNamespaces() {
JSModule[] modules = createModuleStar(
// m1
"var ns = {};" +
/* Recursion in A() prevents inline of A*/
"ns.A = function() {alert(B());ns.A()};" +
"var B = function() {return 6};" +
"ns.A();",
// m2
"B = function() {return 7};" +
"ns.A();");
test(modules, new String[] {
// m1
"var ns = {};" +
"ns.A = function() {alert(6);ns.A()};" + /* Specialized A */
"ns.A();" +
"var B;",
// m2
"ns.A = function() {alert(B());ns.A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"B = function() {return 7};" +
"ns.A();"
});
}
public void testSpecializeInlineWithRegularFunctions() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"function A() {alert(B());A()}" +
"function B() {return 6}" +
"A();",
// m2
"B = function() {return 7};" +
"A();");
test(modules, new String[] {
// m1
"function A() {alert(6);A()}" + /* Specialized A */
"A();" +
"var B;",
// m2
"A = function() {alert(B());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
/* Start of original m2 */
"B = function() {return 7};" +
"A();"
});
}
public void testDontSpecializeLocalNonAnonymousFunctions() {
/* normalize result, but not expected */
enableNormalize(false);
JSModule[] modules = createModuleStar(
// m1
"(function(){var noSpecialize = " +
"function() {alert(6)};noSpecialize()})()",
// m2
"");
test(modules, new String[] {
// m1
"(function(){var noSpecialize = " +
"function() {alert(6)};noSpecialize()})()",
// m2
""
});
}
public void testAddDummyVarsForRemovedFunctions() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var A = function() {alert(B() + C());A()};" +
"var B = function() {return 6};" +
"var C = function() {return 8};" +
"A();",
// m2
"" +
"A();");
test(modules, new String[] {
// m1
"var A = function() {alert(6 + 8);A()};" + /* Specialized A */
"A();" +
"var B, C;",
// m2
"A = function() {alert(B() + C());A()};" + /* Unspecialized A */
"B = function() {return 6};" + /* Removed from m1, so add to m2 */
"C = function() {return 8};" + /* Removed from m1, so add to m2 */
"A();"
});
}
public void testSpecializeRemoveUnusedProperties() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"Foo.prototype.b = function() {return 6};" +
"Foo.prototype.c = function() {return 7};" +
"var aliasA = Foo.prototype.a;" + // Prevents devirtualization of a
"var x = new Foo();" +
"x.a();",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"var aliasA = Foo.prototype.a;" +
"var x = new Foo();" +
"x.a();",
// m2
"Foo.prototype.b = function() {return 6};" +
"Foo.prototype.c = function() {return 7};"
});
}
public void testDontSpecializeAliasedFunctions_inline() {
JSModule[] modules = createModuleStar(
// m1
/* Recursion in A() prevents inline of A*/
"function A() {alert(B());A()}" +
"function B() {return 6}" +
"var aliasA = A;" +
"A();",
// m2
"B = function() {return 7};" +
"B();");
test(modules, new String[] {
// m1
/* Recursion in A() prevents inline of A*/
"function A() {alert(B());A()}" +
"function B() {return 6}" +
"var aliasA = A;" +
"A();",
// m2
"B = function() {return 7};" +
"B();"
});
}
public void testDontSpecializeAliasedFunctions_remove_unused_properties() {
JSModule[] modules = createModuleStar(
// m1
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"Foo.prototype.b = function() {return 6};" +
"var aliasB = Foo.prototype.b;" +
"Foo.prototype.c = function() {return 7};" +
"Foo.prototype.d = function() {return 7};" +
"var aliasA = Foo.prototype.a;" + // Prevents devirtualization of a
"var x = new Foo();" +
"x.a();" +
"var aliasC = (new Foo).c",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a()};" +
"Foo.prototype.b = function() {return 6};" +
"var aliasB = Foo.prototype.b;" +
"Foo.prototype.c = function() {return 7};" +
"var aliasA = Foo.prototype.a;" + // Prevents devirtualization of a
"var x = new Foo();" +
"x.a();" +
"var aliasC = (new Foo).c",
// m2
"Foo.prototype.d = function() {return 7};"
});
}
public void testSpecializeDevirtualizePrototypeMethods() {
JSModule[] modules = createModuleStar(
// m1
"/** @constructor */" +
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {this.a();return 7};" +
"Foo.prototype.b = function() {this.a()};" +
"var x = new Foo();" +
"x.a();",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"var JSCompiler_StaticMethods_a =" +
"function(JSCompiler_StaticMethods_a$self) {" +
"JSCompiler_StaticMethods_a(JSCompiler_StaticMethods_a$self);" +
"return 7" +
"};" +
"var x = new Foo();" +
"JSCompiler_StaticMethods_a(x);",
// m2
"Foo.prototype.a = function() {this.a();return 7};" +
"Foo.prototype.b = function() {this.a()};"
});
}
public void testSpecializeDevirtualizePrototypeMethodsWithInline() {
JSModule[] modules = createModuleStar(
// m1
"/** @constructor */" +
"var Foo = function(){};" + /* constructor */
"Foo.prototype.a = function() {return 7};" +
"var x = new Foo();" +
"var z = x.a();",
// m2
"");
test(modules, new String[] {
// m1
"var Foo = function(){};" + /* constructor */
"var x = new Foo();" +
"var z = 7;",
// m2
"Foo.prototype.a = function() {return 7};"
});
}
/**
* Tests for {@link SpecializeModule.SpecializationState}.
*/
public static class SpecializeModuleSpecializationStateTest
extends CompilerTestCase {
Compiler lastCompiler;
SpecializationState lastState;
@Override
public CompilerPass getProcessor(final Compiler compiler) {
lastCompiler = compiler;
return new CompilerPass() {
public void process(Node externs, Node root) {
SimpleDefinitionFinder defFinder =
new SimpleDefinitionFinder(compiler);
defFinder.process(externs, root);
SimpleFunctionAliasAnalysis functionAliasAnalysis =
new SimpleFunctionAliasAnalysis();
functionAliasAnalysis.analyze(defFinder);
lastState = new SpecializationState(functionAliasAnalysis);
}
};
}
public void testRemovedFunctions() {
testSame("function F(){}\nvar G = function(a){};");
assertEquals(ImmutableSet.of(), lastState.getRemovedFunctions());
Node functionF = findFunction("F");
lastState.reportRemovedFunction(functionF, functionF.getParent());
assertEquals(ImmutableSet.of(functionF), lastState.getRemovedFunctions());
Node functionG = findFunction("F");
lastState.reportRemovedFunction(functionG, functionF.getParent());
assertEquals(ImmutableSet.of(functionF, functionG),
lastState.getRemovedFunctions());
assertEquals(ImmutableSet.of(), lastState.getSpecializedFunctions());
}
public void testSpecializedFunctions() {
testSame("function F(){}\nvar G = function(a){};");
assertEquals(ImmutableSet.of(), lastState.getSpecializedFunctions());
Node functionF = findFunction("F");
lastState.reportSpecializedFunction(functionF);
assertEquals(ImmutableSet.of(functionF),
lastState.getSpecializedFunctions());
Node functionG = findFunction("F");
lastState.reportSpecializedFunction(functionG);
assertEquals(ImmutableSet.of(functionF, functionG),
lastState.getSpecializedFunctions());
assertEquals(ImmutableSet.of(), lastState.getRemovedFunctions());
}
public void testCanFixupFunction() {
testSame("function F(){}\n" +
"var G = function(a){};\n" +
"var ns = {};" +
"ns.H = function(){};" +
"var ns2 = {I : function anon1(){}};" +
"(function anon2(){})();");
assertTrue(lastState.canFixupFunction(findFunction("F")));
assertTrue(lastState.canFixupFunction(findFunction("G")));
assertTrue(lastState.canFixupFunction(findFunction("ns.H")));
assertFalse(lastState.canFixupFunction(findFunction("anon1")));
assertFalse(lastState.canFixupFunction(findFunction("anon2")));
// Can't guarantee safe fixup for aliased functions
testSame("function A(){}\n" +
"var aliasA = A;\n");
assertFalse(lastState.canFixupFunction(findFunction("A")));
}
private Node findFunction(String name) {
FunctionFinder f = new FunctionFinder(name);
new NodeTraversal(lastCompiler, f).traverse(lastCompiler.jsRoot);
assertNotNull("Couldn't find " + name, f.found);
return f.found;
}
/**
* Quick Traversal to find a given function in the AST.
*/
private class FunctionFinder extends AbstractPostOrderCallback {
Node found = null;
final String target;
FunctionFinder(String target) {
this.target = target;
}
public void visit(NodeTraversal t, Node n, Node parent) {
if (NodeUtil.isFunction(n)
&& target.equals(NodeUtil.getFunctionName(n))) {
found = n;
}
}
}
}
}
| JonathanWalsh/Granule-Closure-Compiler | test/com/google/javascript/jscomp/SpecializeModuleTest.java | Java | apache-2.0 | 16,009 |
/*
* Copyright 2015 Paul Horn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ff7.characters
import scalaz.NonEmptyList
sealed trait CharacterAction
object CharacterAction {
val attack: CharacterAction = Attack
val magic: CharacterAction = Magic
// val item: CharacterAction = Item
// val defend: CharacterAction = Defend
val skip: CharacterAction = Skip
// val actions = NonEmptyList(attack, magic, item, defend, skip)
val actions = NonEmptyList(attack, magic, skip)
case object Attack extends CharacterAction
case object Magic extends CharacterAction
// case object Item extends CharacterAction
// case object Defend extends CharacterAction
case object Skip extends CharacterAction
}
| knutwalker/ff7-simulator | api/src/main/scala/ff7/characters/CharacterAction.scala | Scala | apache-2.0 | 1,235 |
'use strict';
const { extend } = require('underscore');
const dbclient = require('abacus-dbclient');
const { testCollectedUsageID, testResourceID, testOrganizationID, testSpaceID, testConsumerID, testPlanID,
testResourceType, testAccountID, testMeteringPlanID, testRatingPlanID,
testPricingPlanID } = require('./fixtures/usageDocumentFieldsConstants');
const _commonBlueprint = {
collected_usage_id: testCollectedUsageID,
resource_id: testResourceID,
organization_id: testOrganizationID,
space_id: testSpaceID,
consumer_id: testConsumerID,
plan_id: testPlanID,
resource_type: testResourceType,
account_id: testAccountID,
metering_plan_id: testMeteringPlanID,
rating_plan_id: testRatingPlanID,
pricing_plan_id: testPricingPlanID
};
const buildUsage = (...builders) => {
const usage = {};
for(let builder of builders)
builder(usage);
return extend(usage, {
id: dbclient.kturi(usage.resource_instance_id, usage.processed)
});
};
const withEndTimestamp = (timestamp) => (usage) => usage.end = timestamp;
const withStartTimestamp = (timestamp) => (usage) => usage.start = timestamp;
const withProcessedTimestamp = (timestamp) => (usage) => usage.processed = timestamp;
const withBlueprint = (blueprint) => (usage) => extend(usage, blueprint);
const withDefaultBlueprint = () => (usage) => extend(usage, _commonBlueprint);
const withResourceInstanceId = (resourceInstanceId) => (usage) => usage.resource_instance_id = resourceInstanceId;
const withAccumulatedUsage = (accumulatedUsage) => (usage) => usage.accumulated_usage = accumulatedUsage;
const buildAccumulatedUsage = (...builders) => {
const accumulatedUsage = { windows: [[null], [null], [null], [null, null, null, null, null, null], [null, null]] };
for(let builder of builders)
builder(accumulatedUsage);
return accumulatedUsage;
};
const withMetricName = (metricName) => (accumulatedUsage) => accumulatedUsage.metric = metricName;
const withCurrentDayQuantity = (quantity) => (accumulatedUsage) =>
accumulatedUsage.windows[3][0] = { quantity: quantity };
const withPreviousDayQuantity = (quantity) => (accumulatedUsage) =>
accumulatedUsage.windows[3][1] = { quantity: quantity };
const withCurrentMonthQuantity = (quantity) => (accumulatedUsage) =>
accumulatedUsage.windows[4][0] = { quantity: quantity };
module.exports = {
buildUsage, withEndTimestamp, withStartTimestamp, withProcessedTimestamp, withBlueprint, withDefaultBlueprint,
withResourceInstanceId, withAccumulatedUsage, buildAccumulatedUsage, withMetricName, withCurrentDayQuantity,
withCurrentMonthQuantity, withPreviousDayQuantity
};
| cloudfoundry-incubator/cf-abacus | lib/aggregation/aggregator/src/test/usage-builder.js | JavaScript | apache-2.0 | 2,638 |
package com.rbmhtechnology.eventuate.chaos
import akka.actor.ActorSystem
import akka.actor.Props
import akka.pattern.BackoffSupervisor
import com.rbmhtechnology.eventuate.ReplicationConnection
import com.rbmhtechnology.eventuate.ReplicationEndpoint
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration.DurationInt
trait ChaosSetup extends App {
def getSystem: ActorSystem
def getEndpoint(implicit system: ActorSystem): ReplicationEndpoint
protected def baseConfig(hostname: String) = ConfigFactory.parseString(
s"""
|akka.actor.provider = "akka.remote.RemoteActorRefProvider"
|akka.remote.enabled-transports = ["akka.remote.netty.tcp"]
|akka.remote.netty.tcp.hostname = "$hostname"
|akka.remote.netty.tcp.port = 2552
|akka.test.single-expect-default = 10s
|akka.loglevel = "INFO"
|eventuate.log.write-batch-size = 16
|eventuate.log.read-timeout = 3s
|eventuate.log.retry-delay = 3s
|akka.remote.netty.tcp.maximum-frame-size = 1024000b
""".stripMargin)
protected def quote(str: String) = "\"" + str + "\""
/** starts the actor watched by a `BackoffSupervisor` */
protected def supervised(props: Props, name: String): Props =
BackoffSupervisor.props(props, name, 1.second, 30.seconds, 0.1)
def name = {
if (args == null || args.length < 1) {
Console.err.println("no <nodename> specified")
sys.exit(1)
} else {
args(0)
}
}
def hostname = sys.env.getOrElse("HOSTNAME", s"$name.eventuate-chaos.docker")
// replication connection to other node(s)
def connections = args.drop(1).map { conn =>
conn.split(":") match {
case Array(host, port) =>
ReplicationConnection(host, port.toInt)
case Array(host) =>
ReplicationConnection(host, 2552)
}
}.toSet
}
| RBMHTechnology/eventuate-chaos | src/main/scala/com/rbmhtechnology/eventuate/chaos/ChaosSetup.scala | Scala | apache-2.0 | 1,847 |
package me.tatarka.timesync.lib;
import android.content.Context;
import java.util.Arrays;
/**
* A class for interacting with a {@link TimeSync}. You can get and set it's configuration, and
* force it to sync immediately. Ta get an instance of the class for a given {@link TimeSync}, use
* {@link TimeSync#get(android.content.Context, Class)}.
*/
public final class TimeSyncProxy {
private Context context;
private String name;
private TimeSync listener;
TimeSyncProxy(Context context, String name) {
this.context = context;
this.name = name;
listener = TimeSyncParser.parseListeners(context).get(name);
}
/**
* Syncs immediately. This is useful for a response to a user action. Use this sparingly, as
* frequent syncs defeat the purpose of using this library.
*/
public void sync() {
TimeSyncService.sync(context, name);
}
/**
* Syncs sometime in the near future, randomizing per device. This is useful in response to a
* server message, using GCM for example, so that the server is not overwhelmed with all devices
* trying to sync at once.
*/
public void syncInexact() {
TimeSyncService.syncInexact(context, name);
}
/**
* Gets the current configuration for the {@link TimeSync}.
*
* @return the configuration
* @see TimeSync.Config
*/
public TimeSync.Config config() {
return listener.config();
}
/**
* Modifies the current configuration for the {@link TimeSync}.
*
* @param edits the edits
* @see TimeSync#edit(TimeSync.Edit...)
*/
public void edit(Iterable<TimeSync.Edit> edits) {
listener.edit(edits);
TimeSyncService.update(context, name);
}
/**
* Modifies the current configuration for the {@link TimeSync}.
*
* @param edits the edits
* @see TimeSync#edit(TimeSync.Edit...)
*/
public void edit(TimeSync.Edit... edits) {
edit(Arrays.asList(edits));
}
}
| evant/timesync | lib/src/main/java/me/tatarka/timesync/lib/TimeSyncProxy.java | Java | apache-2.0 | 2,035 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package database::oracle::mode::dictionarycacheusage;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use Digest::MD5 qw(md5_hex);
sub custom_hitratio_calc {
my ($self, %options) = @_;
my $delta_total = ($options{new_datas}->{$self->{instance} . '_gets'} - $options{old_datas}->{$self->{instance} . '_gets'});
my $delta_cache = ($options{new_datas}->{$self->{instance} . '_getmisses'} - $options{old_datas}->{$self->{instance} . '_getmisses'});
$self->{result_values}->{hit_ratio} = $delta_total ? (100 * $delta_cache / $delta_total) : 0;
return 0;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'global', cb_prefix_output => 'prefix_global_output', type => 0 },
];
$self->{maps_counters}->{global} = [
{ label => 'get-hits', nlabel => 'dictionary.cache.get.hitratio.percentage', set => {
key_values => [ { name => 'getmisses', diff => 1 }, { name => 'gets', diff => 1 } ],
closure_custom_calc => $self->can('custom_hitratio_calc'),
output_template => 'get hit ratio %.2f%%',
output_use => 'hit_ratio', threshold_use => 'hit_ratio',
perfdatas => [
{ label => 'get_hit_ratio', value => 'hit_ratio', template => '%.2f', min => 0, max => 100, unit => '%' },
],
}
},
];
}
sub prefix_global_output {
my ($self, %options) = @_;
return 'SGA dictionary cache ';
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, statefile => 1, force_new_perfdata => 1);
bless $self, $class;
$options{options}->add_options(arguments => {
});
return $self;
}
sub manage_selection {
my ($self, %options) = @_;
my $query = q{
SELECT SUM(gets), SUM(gets-getmisses) FROM v$rowcache
};
$options{sql}->connect();
$options{sql}->query(query => $query);
my @result = $options{sql}->fetchrow_array();
$options{sql}->disconnect();
$self->{global} = {
gets => $result[0],
getmisses => $result[1],
};
$self->{cache_name} = "oracle_" . $self->{mode} . '_' . $options{sql}->get_unique_id4save() . '_' .
(defined($self->{option_results}->{filter_counters}) ? md5_hex($self->{option_results}->{filter_counters}) : md5_hex('all'));
}
1;
__END__
=head1 MODE
Check Oracle dictionary cache usage.
=over 8
=item B<--warning-*> B<--critical-*>
Thresholds.
Can be: 'get-hits'.
=back
=cut
| Tpo76/centreon-plugins | database/oracle/mode/dictionarycacheusage.pm | Perl | apache-2.0 | 3,358 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::cisco::wlc::snmp::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_snmp);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$self->{modes} = {
'ap-channel-interference' => 'centreon::common::airespace::snmp::mode::apchannelinterference',
'ap-channel-noise' => 'centreon::common::airespace::snmp::mode::apchannelnoise',
'ap-status' => 'centreon::common::airespace::snmp::mode::apstatus',
'ap-users' => 'centreon::common::airespace::snmp::mode::apusers',
'cpu' => 'centreon::common::airespace::snmp::mode::cpu',
'discovery' => 'centreon::common::airespace::snmp::mode::discovery',
'hardware' => 'centreon::common::airespace::snmp::mode::hardware',
'interfaces' => 'snmp_standard::mode::interfaces',
'list-groups' => 'centreon::common::airespace::snmp::mode::listgroups',
'list-interfaces' => 'snmp_standard::mode::listinterfaces',
'list-radius-acc-servers' => 'centreon::common::airespace::snmp::mode::listradiusaccservers',
'list-radius-auth-servers' => 'centreon::common::airespace::snmp::mode::listradiusauthservers',
'memory' => 'centreon::common::airespace::snmp::mode::memory',
'radius-acc-servers' => 'centreon::common::airespace::snmp::mode::radiusaccservers',
'radius-auth-servers' => 'centreon::common::airespace::snmp::mode::radiusauthservers'
};
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check Cisco Wireless Lan Controller in SNMP.
=cut
| Tpo76/centreon-plugins | network/cisco/wlc/snmp/plugin.pm | Perl | apache-2.0 | 2,575 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="de">
<head>
<!-- Generated by javadoc (version 1.7.0_17) on Tue May 14 03:45:03 CEST 2013 -->
<title>com.badlogic.gdx.maps (libgdx API)</title>
<meta name="date" content="2013-05-14">
<link rel="stylesheet" type="text/css" href="../../../../stylesheet.css" title="Style">
</head>
<body>
<h1 class="bar"><a href="../../../../com/badlogic/gdx/maps/package-summary.html" target="classFrame">com.badlogic.gdx.maps</a></h1>
<div class="indexContainer">
<h2 title="Interfaces">Interfaces</h2>
<ul title="Interfaces">
<li><a href="ImageResolver.html" title="interface in com.badlogic.gdx.maps" target="classFrame"><i>ImageResolver</i></a></li>
<li><a href="MapRenderer.html" title="interface in com.badlogic.gdx.maps" target="classFrame"><i>MapRenderer</i></a></li>
</ul>
<h2 title="Classes">Classes</h2>
<ul title="Classes">
<li><a href="ImageResolver.AssetManagerImageResolver.html" title="class in com.badlogic.gdx.maps" target="classFrame">ImageResolver.AssetManagerImageResolver</a></li>
<li><a href="ImageResolver.DirectImageResolver.html" title="class in com.badlogic.gdx.maps" target="classFrame">ImageResolver.DirectImageResolver</a></li>
<li><a href="ImageResolver.TextureAtlasImageResolver.html" title="class in com.badlogic.gdx.maps" target="classFrame">ImageResolver.TextureAtlasImageResolver</a></li>
<li><a href="Map.html" title="class in com.badlogic.gdx.maps" target="classFrame">Map</a></li>
<li><a href="MapLayer.html" title="class in com.badlogic.gdx.maps" target="classFrame">MapLayer</a></li>
<li><a href="MapLayers.html" title="class in com.badlogic.gdx.maps" target="classFrame">MapLayers</a></li>
<li><a href="MapObject.html" title="class in com.badlogic.gdx.maps" target="classFrame">MapObject</a></li>
<li><a href="MapObjects.html" title="class in com.badlogic.gdx.maps" target="classFrame">MapObjects</a></li>
<li><a href="MapProperties.html" title="class in com.badlogic.gdx.maps" target="classFrame">MapProperties</a></li>
</ul>
</div>
</body>
</html>
| leszekuchacz/Leszek-Uchacz | docs/api/com/badlogic/gdx/maps/package-frame.html | HTML | apache-2.0 | 2,107 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (1.8.0_101) on Wed Dec 14 17:18:32 GMT 2016 -->
<title>API Help (ExoPlayer library)</title>
<meta name="date" content="2016-12-14">
<link rel="stylesheet" type="text/css" href="stylesheet.css" title="Style">
<script type="text/javascript" src="script.js"></script>
</head>
<body>
<script type="text/javascript"><!--
try {
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="API Help (ExoPlayer library)";
}
}
catch(err) {
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar.top">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.top" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.top.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="overview-summary.html">Overview</a></li>
<li>Package</li>
<li>Class</li>
<li><a href="overview-tree.html">Tree</a></li>
<li><a href="deprecated-list.html">Deprecated</a></li>
<li><a href="index-all.html">Index</a></li>
<li class="navBarCell1Rev">Help</li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="index.html?help-doc.html" target="_top">Frames</a></li>
<li><a href="help-doc.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="header">
<h1 class="title">How This API Document Is Organized</h1>
<div class="subTitle">This API (Application Programming Interface) document has pages corresponding to the items in the navigation bar, described as follows.</div>
</div>
<div class="contentContainer">
<ul class="blockList">
<li class="blockList">
<h2>Overview</h2>
<p>The <a href="overview-summary.html">Overview</a> page is the front page of this API document and provides a list of all packages with a summary for each. This page can also contain an overall description of the set of packages.</p>
</li>
<li class="blockList">
<h2>Package</h2>
<p>Each package has a page that contains a list of its classes and interfaces, with a summary for each. This page can contain six categories:</p>
<ul>
<li>Interfaces (italic)</li>
<li>Classes</li>
<li>Enums</li>
<li>Exceptions</li>
<li>Errors</li>
<li>Annotation Types</li>
</ul>
</li>
<li class="blockList">
<h2>Class/Interface</h2>
<p>Each class, interface, nested class and nested interface has its own separate page. Each of these pages has three sections consisting of a class/interface description, summary tables, and detailed member descriptions:</p>
<ul>
<li>Class inheritance diagram</li>
<li>Direct Subclasses</li>
<li>All Known Subinterfaces</li>
<li>All Known Implementing Classes</li>
<li>Class/interface declaration</li>
<li>Class/interface description</li>
</ul>
<ul>
<li>Nested Class Summary</li>
<li>Field Summary</li>
<li>Constructor Summary</li>
<li>Method Summary</li>
</ul>
<ul>
<li>Field Detail</li>
<li>Constructor Detail</li>
<li>Method Detail</li>
</ul>
<p>Each summary entry contains the first sentence from the detailed description for that item. The summary entries are alphabetical, while the detailed descriptions are in the order they appear in the source code. This preserves the logical groupings established by the programmer.</p>
</li>
<li class="blockList">
<h2>Annotation Type</h2>
<p>Each annotation type has its own separate page with the following sections:</p>
<ul>
<li>Annotation Type declaration</li>
<li>Annotation Type description</li>
<li>Required Element Summary</li>
<li>Optional Element Summary</li>
<li>Element Detail</li>
</ul>
</li>
<li class="blockList">
<h2>Enum</h2>
<p>Each enum has its own separate page with the following sections:</p>
<ul>
<li>Enum declaration</li>
<li>Enum description</li>
<li>Enum Constant Summary</li>
<li>Enum Constant Detail</li>
</ul>
</li>
<li class="blockList">
<h2>Tree (Class Hierarchy)</h2>
<p>There is a <a href="overview-tree.html">Class Hierarchy</a> page for all packages, plus a hierarchy for each package. Each hierarchy page contains a list of classes and a list of interfaces. The classes are organized by inheritance structure starting with <code>java.lang.Object</code>. The interfaces do not inherit from <code>java.lang.Object</code>.</p>
<ul>
<li>When viewing the Overview page, clicking on "Tree" displays the hierarchy for all packages.</li>
<li>When viewing a particular package, class or interface page, clicking "Tree" displays the hierarchy for only that package.</li>
</ul>
</li>
<li class="blockList">
<h2>Deprecated API</h2>
<p>The <a href="deprecated-list.html">Deprecated API</a> page lists all of the API that have been deprecated. A deprecated API is not recommended for use, generally due to improvements, and a replacement API is usually given. Deprecated APIs may be removed in future implementations.</p>
</li>
<li class="blockList">
<h2>Index</h2>
<p>The <a href="index-all.html">Index</a> contains an alphabetic list of all classes, interfaces, constructors, methods, and fields.</p>
</li>
<li class="blockList">
<h2>Prev/Next</h2>
<p>These links take you to the next or previous class, interface, package, or related page.</p>
</li>
<li class="blockList">
<h2>Frames/No Frames</h2>
<p>These links show and hide the HTML frames. All pages are available with or without frames.</p>
</li>
<li class="blockList">
<h2>All Classes</h2>
<p>The <a href="allclasses-noframe.html">All Classes</a> link shows all classes and interfaces except non-static nested types.</p>
</li>
<li class="blockList">
<h2>Serialized Form</h2>
<p>Each serializable or externalizable class has a description of its serialization fields and methods. This information is of interest to re-implementors, not to developers using the API. While there is no link in the navigation bar, you can get to this information by going to any serialized class and clicking "Serialized Form" in the "See also" section of the class description.</p>
</li>
<li class="blockList">
<h2>Constant Field Values</h2>
<p>The <a href="constant-values.html">Constant Field Values</a> page lists the static final fields and their values.</p>
</li>
</ul>
<span class="emphasizedPhrase">This help file applies to API documentation generated using the standard doclet.</span></div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar.bottom">
<!-- -->
</a>
<div class="skipNav"><a href="#skip.navbar.bottom" title="Skip navigation links">Skip navigation links</a></div>
<a name="navbar.bottom.firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="overview-summary.html">Overview</a></li>
<li>Package</li>
<li>Class</li>
<li><a href="overview-tree.html">Tree</a></li>
<li><a href="deprecated-list.html">Deprecated</a></li>
<li><a href="index-all.html">Index</a></li>
<li class="navBarCell1Rev">Help</li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li>Prev</li>
<li>Next</li>
</ul>
<ul class="navList">
<li><a href="index.html?help-doc.html" target="_top">Frames</a></li>
<li><a href="help-doc.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip.navbar.bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
| amzn/exoplayer-amazon-port | docs/doc/reference-v1/help-doc.html | HTML | apache-2.0 | 8,253 |
# Sniper
**Evolution naturelle de:** Ingénieur, Rodeur.
## Rang 1
### Ciblage
> Vous désignez une cible, lorsque cette cible se dissimule vous pouvez tout de même la repérer avec un test de chance.
### Mitraille
Z: ARM + Ag -2
> Attaque en arc de cercle devant le joueur, frappe tous les ennemis dans l'arc de cercle.
> Utilise 1 projectile par ennemi.
### Immobilisation
AD:TA-[Ag], ARM + Ag + 2, immobilisé si dégats > 0
> Vise la cheville
## Rang 2
### Bou-Portant
CAC:ARM + Fo
> Attaque CAC à réaliser avec une arme de tir
> L'Ag n'est pas comptée si elle fait partie de ARM.
### Tir en cloche
AD:TA-[Ag, Int], ARM - 1d6 + Ag + Fo
## Rang 3
### Projectile fantome
AD:ARM + Ag - 1d8 - 2, imparable.
### Projection
AD:TA-[Fo, Ag], ARM + Ag + Fo - 1d8
> projette la cible si parade échoue
### Head-Shot
AD:TS-[Ag], ARM + 1d12 + Fo + Ag
> Frappe automatiquement en pleine tête.
| redSpoutnik/Sidh-Horizons | Arbres_competences/Sniper.md | Markdown | apache-2.0 | 924 |
package semver
import (
"fmt"
"strings"
"github.com/blang/semver"
"github.com/pivotal-cf/go-pivnet/v7/logger"
)
type SemverConverter struct {
logger logger.Logger
}
func NewSemverConverter(logger logger.Logger) *SemverConverter {
return &SemverConverter{logger}
}
// ToValidSemver attempts to return the input as valid semver.
// If the input fails to parse as semver, it appends .0 or .0.0 to the input and retries
// If this is still not valid semver, it returns an error
func (s SemverConverter) ToValidSemver(input string) (semver.Version, error) {
v, err := semver.Parse(input)
if err == nil {
return v, nil
}
s.logger.Info(fmt.Sprintf(
"failed to parse semver: '%s', appending zeros and trying again",
input,
))
maybeSemver := input
segs := strings.SplitN(maybeSemver, ".", 3)
switch len(segs) {
case 2:
maybeSemver += ".0"
case 1:
maybeSemver += ".0.0"
}
v, err = semver.Parse(maybeSemver)
if err == nil {
return v, nil
}
s.logger.Info(fmt.Sprintf(
"still failed to parse semver: '%s', giving up",
maybeSemver,
))
return semver.Version{}, err
}
| pivotal-cf-experimental/pivnet-resource | semver/semver.go | GO | apache-2.0 | 1,101 |
# Default node behavior
The default behavior gets implemented by the intermediate node types such as `SoftwareComponent`. This behavior will look at the standard lifecycle operations.
If all of them are present they are added in the following order into the Dockerfile:
1. `create`
2. `configure`
3. `start`
The scripts and their dependencies for each phase get copied into the Dockerfile,
the corresponding properties get set as environment variable,
and the command gets executed (`create` and `configure`)
The `start` script does not get executed during the building process. It gets copied just like all other scripts.
Environment variables also get set normally, however the script will not get executed using a `RUN` command.
Instead, it will be added to the entrypoint list that is responsible to run the `start` commands once the container gets created.
# Custom node behavior
**NOTE**: All current implementations of the custom node only work this way if they do not feature a custom standard lifecycle.
If they have one, we refer to the default behavior.
## Apache
We assume that Apache always comes with PHP, that is the reason why we use the `library/php:httpd` image.
Furthermore we expect that all child nodes (WebApplications) have a create or configure script that copies the contents to the `/var/www` folder.
These scripts are executed as root user.
## MySQL (including Database)
MySQL defaults to the `library/mysql:latest` image. The predefined properties are taken (such as root password) and set as configuration environment variables.
If a child database contains a `.sql` artifact, the file will be copied in a special directory that is executed when starting the container.
## Java Runtime and Application
The java runtime and application types will use the `library/openjdk` image by default. The jar defined in the JavaApplication node template will be copied into the Dockerfile (including its dependencies).
A `java -jar <JAR_FILE>` command is triggered to launch the application.
| StuPro-TOSCAna/TOSCAna | docs/dev/plugins/kubernetes/building-dockerfiles.md | Markdown | apache-2.0 | 2,030 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import mock
import six
import yaml
from heat.common import config
from heat.common import exception
from heat.common import template_format
from heat.tests.common import HeatTestCase
from heat.tests import utils
class JsonToYamlTest(HeatTestCase):
def setUp(self):
super(JsonToYamlTest, self).setUp()
self.expected_test_count = 2
self.longMessage = True
self.maxDiff = None
def test_convert_all_templates(self):
path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'templates')
template_test_count = 0
for (json_str,
yml_str,
file_name) in self.convert_all_json_to_yaml(path):
self.compare_json_vs_yaml(json_str, yml_str, file_name)
template_test_count += 1
if template_test_count >= self.expected_test_count:
break
self.assertTrue(template_test_count >= self.expected_test_count,
'Expected at least %d templates to be tested, not %d' %
(self.expected_test_count, template_test_count))
def compare_json_vs_yaml(self, json_str, yml_str, file_name):
yml = template_format.parse(yml_str)
self.assertEqual(u'2012-12-12', yml[u'HeatTemplateFormatVersion'],
file_name)
self.assertFalse(u'AWSTemplateFormatVersion' in yml, file_name)
del(yml[u'HeatTemplateFormatVersion'])
jsn = template_format.parse(json_str)
if u'AWSTemplateFormatVersion' in jsn:
del(jsn[u'AWSTemplateFormatVersion'])
self.assertEqual(yml, jsn, file_name)
def convert_all_json_to_yaml(self, dirpath):
for path in os.listdir(dirpath):
if not path.endswith('.template') and not path.endswith('.json'):
continue
f = open(os.path.join(dirpath, path), 'r')
json_str = f.read()
yml_str = template_format.convert_json_to_yaml(json_str)
yield (json_str, yml_str, f.name)
class YamlMinimalTest(HeatTestCase):
def _parse_template(self, tmpl_str, msg_str):
parse_ex = self.assertRaises(ValueError,
template_format.parse,
tmpl_str)
self.assertIn(msg_str, six.text_type(parse_ex))
def test_long_yaml(self):
template = {'HeatTemplateFormatVersion': '2012-12-12'}
config.cfg.CONF.set_override('max_template_size', 1024)
template['Resources'] = ['a'] * (config.cfg.CONF.max_template_size / 3)
limit = config.cfg.CONF.max_template_size
long_yaml = yaml.safe_dump(template)
self.assertTrue(len(long_yaml) > limit)
ex = self.assertRaises(exception.RequestLimitExceeded,
template_format.parse, long_yaml)
msg = ('Request limit exceeded: Template exceeds maximum allowed size '
'(1024 bytes)')
self.assertEqual(msg, six.text_type(ex))
def test_parse_no_version_format(self):
yaml = ''
self._parse_template(yaml, 'Template format version not found')
yaml2 = '''Parameters: {}
Mappings: {}
Resources: {}
Outputs: {}
'''
self._parse_template(yaml2, 'Template format version not found')
def test_parse_string_template(self):
tmpl_str = 'just string'
msg = 'The template is not a JSON object or YAML mapping.'
self._parse_template(tmpl_str, msg)
def test_parse_invalid_yaml_and_json_template(self):
tmpl_str = '{test'
msg = 'line 1, column 1'
self._parse_template(tmpl_str, msg)
def test_parse_json_document(self):
tmpl_str = '["foo" , "bar"]'
msg = 'The template is not a JSON object or YAML mapping.'
self._parse_template(tmpl_str, msg)
def test_parse_empty_json_template(self):
tmpl_str = '{}'
msg = 'Template format version not found'
self._parse_template(tmpl_str, msg)
def test_parse_yaml_template(self):
tmpl_str = 'heat_template_version: 2013-05-23'
expected = {'heat_template_version': '2013-05-23'}
self.assertEqual(expected, template_format.parse(tmpl_str))
class YamlParseExceptions(HeatTestCase):
scenarios = [
('scanner', dict(raised_exception=yaml.scanner.ScannerError())),
('parser', dict(raised_exception=yaml.parser.ParserError())),
('reader',
dict(raised_exception=yaml.reader.ReaderError('', '', '', '', ''))),
]
def test_parse_to_value_exception(self):
text = 'not important'
with mock.patch.object(yaml, 'load') as yaml_loader:
yaml_loader.side_effect = self.raised_exception
self.assertRaises(ValueError,
template_format.parse, text)
class JsonYamlResolvedCompareTest(HeatTestCase):
def setUp(self):
super(JsonYamlResolvedCompareTest, self).setUp()
self.longMessage = True
self.maxDiff = None
def load_template(self, file_name):
filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'templates', file_name)
f = open(filepath)
t = template_format.parse(f.read())
f.close()
return t
def compare_stacks(self, json_file, yaml_file, parameters):
t1 = self.load_template(json_file)
t2 = self.load_template(yaml_file)
del(t1[u'AWSTemplateFormatVersion'])
t1[u'HeatTemplateFormatVersion'] = t2[u'HeatTemplateFormatVersion']
stack1 = utils.parse_stack(t1, parameters)
stack2 = utils.parse_stack(t2, parameters)
# compare resources separately so that resolved static data
# is compared
t1nr = dict(stack1.t.t)
del(t1nr['Resources'])
t2nr = dict(stack2.t.t)
del(t2nr['Resources'])
self.assertEqual(t1nr, t2nr)
self.assertEqual(set(stack1.keys()), set(stack2.keys()))
for key in stack1:
self.assertEqual(stack1[key].t, stack2[key].t)
def test_neutron_resolved(self):
self.compare_stacks('Neutron.template', 'Neutron.yaml', {})
def test_wordpress_resolved(self):
self.compare_stacks('WordPress_Single_Instance.template',
'WordPress_Single_Instance.yaml',
{'KeyName': 'test'})
| redhat-openstack/heat | heat/tests/test_template_format.py | Python | apache-2.0 | 7,015 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="fr">
<head>
<!-- Generated by javadoc (version 1.7.0_04) on Fri Mar 15 01:08:46 CET 2013 -->
<title>U-Index</title>
<meta name="date" content="2013-03-15">
<link rel="stylesheet" type="text/css" href="../stylesheet.css" title="Style">
</head>
<body>
<script type="text/javascript"><!--
if (location.href.indexOf('is-external=true') == -1) {
parent.document.title="U-Index";
}
//-->
</script>
<noscript>
<div>JavaScript is disabled on your browser.</div>
</noscript>
<!-- ========= START OF TOP NAVBAR ======= -->
<div class="topNav"><a name="navbar_top">
<!-- -->
</a><a href="#skip-navbar_top" title="Skip navigation links"></a><a name="navbar_top_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../fr/ups/djapi/package-summary.html">Package</a></li>
<li>Class</li>
<li>Use</li>
<li><a href="../fr/ups/djapi/package-tree.html">Tree</a></li>
<li><a href="../deprecated-list.html">Deprecated</a></li>
<li class="navBarCell1Rev">Index</li>
<li><a href="../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="index-11.html">Prev Letter</a></li>
<li>Next Letter</li>
</ul>
<ul class="navList">
<li><a href="../index.html?index-filesindex-12.html" target="_top">Frames</a></li>
<li><a href="index-12.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_top">
<li><a href="../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_top");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_top">
<!-- -->
</a></div>
<!-- ========= END OF TOP NAVBAR ========= -->
<div class="contentContainer"><a href="index-1.html">C</a> <a href="index-2.html">D</a> <a href="index-3.html">E</a> <a href="index-4.html">F</a> <a href="index-5.html">G</a> <a href="index-6.html">I</a> <a href="index-7.html">L</a> <a href="index-8.html">N</a> <a href="index-9.html">P</a> <a href="index-10.html">R</a> <a href="index-11.html">S</a> <a href="index-12.html">U</a> <a name="_U_">
<!-- -->
</a>
<h2 class="title">U</h2>
<dl>
<dt><span class="strong"><a href="../fr/ups/djapi/DJAPIConfiguration.html#url">url</a></span> - Variable in class fr.ups.djapi.<a href="../fr/ups/djapi/DJAPIConfiguration.html" title="class in fr.ups.djapi">DJAPIConfiguration</a></dt>
<dd> </dd>
</dl>
<a href="index-1.html">C</a> <a href="index-2.html">D</a> <a href="index-3.html">E</a> <a href="index-4.html">F</a> <a href="index-5.html">G</a> <a href="index-6.html">I</a> <a href="index-7.html">L</a> <a href="index-8.html">N</a> <a href="index-9.html">P</a> <a href="index-10.html">R</a> <a href="index-11.html">S</a> <a href="index-12.html">U</a> </div>
<!-- ======= START OF BOTTOM NAVBAR ====== -->
<div class="bottomNav"><a name="navbar_bottom">
<!-- -->
</a><a href="#skip-navbar_bottom" title="Skip navigation links"></a><a name="navbar_bottom_firstrow">
<!-- -->
</a>
<ul class="navList" title="Navigation">
<li><a href="../fr/ups/djapi/package-summary.html">Package</a></li>
<li>Class</li>
<li>Use</li>
<li><a href="../fr/ups/djapi/package-tree.html">Tree</a></li>
<li><a href="../deprecated-list.html">Deprecated</a></li>
<li class="navBarCell1Rev">Index</li>
<li><a href="../help-doc.html">Help</a></li>
</ul>
</div>
<div class="subNav">
<ul class="navList">
<li><a href="index-11.html">Prev Letter</a></li>
<li>Next Letter</li>
</ul>
<ul class="navList">
<li><a href="../index.html?index-filesindex-12.html" target="_top">Frames</a></li>
<li><a href="index-12.html" target="_top">No Frames</a></li>
</ul>
<ul class="navList" id="allclasses_navbar_bottom">
<li><a href="../allclasses-noframe.html">All Classes</a></li>
</ul>
<div>
<script type="text/javascript"><!--
allClassesLink = document.getElementById("allclasses_navbar_bottom");
if(window==top) {
allClassesLink.style.display = "block";
}
else {
allClassesLink.style.display = "none";
}
//-->
</script>
</div>
<a name="skip-navbar_bottom">
<!-- -->
</a></div>
<!-- ======== END OF BOTTOM NAVBAR ======= -->
</body>
</html>
| Ornro/DJAPI | doc/index-files/index-12.html | HTML | apache-2.0 | 4,478 |
/**
* jetbrick-template
* http://subchen.github.io/jetbrick-template/
*
* Copyright 2010-2014 Guoqiang Chen. All rights reserved.
* Email: [email protected]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package jetbrick.template.resource;
import java.io.IOException;
import java.io.InputStream;
import java.util.concurrent.atomic.AtomicLong;
import jetbrick.template.utils.UnsafeByteArrayInputStream;
/**
* 以源码形式存在的资源.
*
* @since 1.1.3
* @author Guoqiang Chen
*/
public class SourceCodeResource extends Resource {
private static final String ENCODING = "utf-8";
private static AtomicLong index = new AtomicLong();
private final String source;
public SourceCodeResource(String source) {
super("/unknown/file." + index.incrementAndGet(), ENCODING);
this.source = source;
}
@Override
public String getAbsolutePath() {
return "(unknown)";
}
@Override
public long lastModified() {
return 0;
}
@Override
public InputStream getInputStream() throws IOException {
return new UnsafeByteArrayInputStream(source.getBytes(ENCODING));
}
@Override
public char[] getSource() {
return source.toCharArray();
}
@Override
public char[] getSource(String encoding) {
return source.toCharArray();
}
}
| subchen/jetbrick-template-1x | src/main/java/jetbrick/template/resource/SourceCodeResource.java | Java | apache-2.0 | 1,866 |
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
import requests
from mistral.actions import std_actions
from mistral.db.v2 import api as db_api
from mistral.services import workflows as wf_service
from mistral.tests.unit import base as test_base
from mistral.tests.unit.engine import base
from mistral.workflow import states
# Use the set_default method to set value otherwise in certain test cases
# the change in value is not permanent.
cfg.CONF.set_default('auth_enable', False, group='pecan')
ENV = {
'__actions': {
'std.http': {
'auth': 'librarian:password123',
'timeout': 30,
}
}
}
EXPECTED_ENV_AUTH = ('librarian', 'password123')
WORKFLOW1 = """
---
version: "2.0"
wf1:
type: direct
tasks:
task1:
action: std.http url="https://api.library.org/books"
publish:
result: <% $ %>
"""
WORKFLOW2 = """
---
version: "2.0"
wf2:
type: direct
tasks:
task1:
action: std.http url="https://api.library.org/books" timeout=60
publish:
result: <% $ %>
"""
WORKFLOW1_WITH_ITEMS = """
---
version: "2.0"
wf1_with_items:
type: direct
input:
- links
tasks:
task1:
with-items: link in <% $.links %>
action: std.http url=<% $.link %>
publish:
result: <% $ %>
"""
WORKFLOW2_WITH_ITEMS = """
---
version: "2.0"
wf2_with_items:
type: direct
input:
- links
tasks:
task1:
with-items: link in <% $.links %>
action: std.http url=<% $.link %> timeout=60
publish:
result: <% $ %>
"""
class ActionDefaultTest(base.EngineTestCase):
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_action_defaults_from_env(self):
wf_service.create_workflows(WORKFLOW1)
wf_ex = self.engine.start_workflow('wf1', env=ENV)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
requests.request.assert_called_with(
'GET', 'https://api.library.org/books',
params=None, data=None, headers=None, cookies=None,
allow_redirects=None, proxies=None, verify=None,
auth=EXPECTED_ENV_AUTH,
timeout=ENV['__actions']['std.http']['timeout'])
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_action_defaults_from_env_not_applied(self):
wf_service.create_workflows(WORKFLOW2)
wf_ex = self.engine.start_workflow('wf2', env=ENV)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
requests.request.assert_called_with(
'GET', 'https://api.library.org/books',
params=None, data=None, headers=None, cookies=None,
allow_redirects=None, proxies=None, verify=None,
auth=EXPECTED_ENV_AUTH,
timeout=60
)
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_with_items_action_defaults_from_env(self):
wf_service.create_workflows(WORKFLOW1_WITH_ITEMS)
wf_input = {
'links': [
'https://api.library.org/books',
'https://api.library.org/authors'
]
}
wf_ex = self.engine.start_workflow(
'wf1_with_items',
wf_input=wf_input,
env=ENV
)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
calls = [mock.call('GET', url, params=None, data=None,
headers=None, cookies=None,
allow_redirects=None, proxies=None,
auth=EXPECTED_ENV_AUTH, verify=None,
timeout=ENV['__actions']['std.http']['timeout'])
for url in wf_input['links']]
requests.request.assert_has_calls(calls, any_order=True)
@mock.patch.object(
requests, 'request',
mock.MagicMock(return_value=test_base.FakeHTTPResponse('', 200, 'OK')))
@mock.patch.object(
std_actions.HTTPAction, 'is_sync',
mock.MagicMock(return_value=True))
def test_with_items_action_defaults_from_env_not_applied(self):
wf_service.create_workflows(WORKFLOW2_WITH_ITEMS)
wf_input = {
'links': [
'https://api.library.org/books',
'https://api.library.org/authors'
]
}
wf_ex = self.engine.start_workflow(
'wf2_with_items',
wf_input=wf_input,
env=ENV
)
self.await_workflow_success(wf_ex.id)
with db_api.transaction():
wf_ex = db_api.get_workflow_execution(wf_ex.id)
self.assertEqual(states.SUCCESS, wf_ex.state)
self._assert_single_item(wf_ex.task_executions, name='task1')
calls = [mock.call('GET', url, params=None, data=None,
headers=None, cookies=None,
allow_redirects=None, proxies=None,
auth=EXPECTED_ENV_AUTH, verify=None,
timeout=60)
for url in wf_input['links']]
requests.request.assert_has_calls(calls, any_order=True)
| StackStorm/mistral | mistral/tests/unit/engine/test_action_defaults.py | Python | apache-2.0 | 6,915 |
/*
* Copyright 2012 ZXing authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#import "ZXReader.h"
@class ZXBinaryBitmap, ZXDecodeHints, ZXResult;
/**
* This implementation can detect and decode a MaxiCode in an image.
*/
@interface ZXMaxiCodeReader : NSObject <ZXReader>
@end
| hgl888/TeamTalk | ios/ZXing/maxicode/ZXMaxiCodeReader.h | C | apache-2.0 | 829 |
<?php
namespace Bigbank\Gcm;
/**
* Gcm response parser
*/
class Response
{
/**
* Unique ID (number) identifying the multicast message.
*
* @var integer
*/
private $multicastId = null;
/**
* Unique id identifying the single message.
*
* Only have value if single or topic message is sent to google
*
* @var int
*/
private $messageId = null;
/**
* Number of messages that were processed without an error.
*
* @var integer
*/
private $success = null;
/**
* Number of messages that could not be processed.
*
* @var integer
*/
private $failure = null;
/**
* Number of results that contain a canonical registration ID.
*
* @var integer
*/
private $canonicalIds = null;
/**
* Holds single message error
*
* @var string
*/
private $error = null;
/**
* Array of objects representing the status of the messages processed.
* The objects are listed in the same order as the request
* (i.e., for each registration ID in the request, its result is listed in the same index in the response)
* and they can have these fields:
* message_id: String representing the message when it was successfully processed.
* registration_id: If set, means that GCM processed the message but it has another canonical
* registration ID for that device, so sender should replace the IDs on future requests
* (otherwise they might be rejected). This field is never set if there is an error in the request.
* error: String describing an error that occurred while processing the message for that recipient.
* The possible values are the same as documented in the above table, plus "Unavailable"
* (meaning GCM servers were busy and could not process the message for that particular recipient,
* so it could be retried).
*
* @var array
*/
private $results = [];
/**
* @param Message $message
* @param string $responseBody json string of google cloud message server response
*
* @throws Exception
*/
public function __construct(Message $message, $responseBody)
{
$data = \json_decode($responseBody, true);
if ($data === null) {
throw new Exception("Malformed response body. " . $responseBody, Exception::MALFORMED_RESPONSE);
}
if (!$data['error']) {
$this->messageId = (isset($data['message_id'])) ? $data['message_id'] : null;
$this->multicastId = $data['multicast_id'];
$this->failure = $data['failure'];
$this->success = (!$this->multicastId) ? 1 : $data['success'];
$this->canonicalIds = $data['canonical_ids'];
$this->results = [];
$this->parseResults($message, $data);
} else {
$this->error = $data['error'];
$this->messageId = (isset($data['message_id'])) ? $data['message_id'] : null;
$this->failure = (!isset($data['failure'])) ? 1 : $data['failure'];
}
}
/**
* @return int
*/
public function getMulticastId()
{
return $this->multicastId;
}
/**
* @return int|null
*/
public function getMessageId()
{
return $this->messageId;
}
/**
* @return int
*/
public function getSuccessCount()
{
return $this->success;
}
/**
* @return int
*/
public function getFailureCount()
{
return $this->failure;
}
/**
* @return int
*/
public function getNewRegistrationIdsCount()
{
return $this->canonicalIds;
}
/**
* @return array
*/
public function getResults()
{
return $this->results;
}
/**
* @return string
*/
public function getError()
{
return $this->error;
}
/**
* Return an array of expired registration ids linked to new id
* All old registration ids must be updated to new ones in DB
*
* @return array oldRegistrationId => newRegistrationId
*/
public function getNewRegistrationIds()
{
if ($this->getNewRegistrationIdsCount() == 0) {
return [];
}
$filteredResults = array_filter($this->results,
function ($result) {
return isset($result['registration_id']);
});
$data = array_map(function ($result) {
return $result['registration_id'];
}, $filteredResults);
return $data;
}
/**
* Returns an array containing invalid registration ids
* They must be removed from DB because the application was uninstalled from the device.
*
* @return array
*/
public function getInvalidRegistrationIds()
{
if ($this->getFailureCount() == 0) {
return [];
}
$filteredResults = array_filter($this->results,
function ($result) {
return (
isset($result['error'])
&&
(
($result['error'] == "NotRegistered")
||
($result['error'] == "InvalidRegistration")
)
);
});
return array_keys($filteredResults);
}
/**
* Returns an array of registration ids for which you must resend a message,
* cause devices are not available now.
*
* @return array
*/
public function getUnavailableRegistrationIds()
{
if ($this->getFailureCount() == 0) {
return [];
}
$filteredResults = array_filter($this->results,
function ($result) {
return (
isset($result['error'])
&&
($result['error'] == "Unavailable")
);
});
return array_keys($filteredResults);
}
/**
* Parse result array with correct data
*
* @param Message $message
* @param array $response
*/
private function parseResults(Message $message, array $response)
{
if (is_array($message->getRecipients())) {
foreach ($message->getRecipients() as $key => $registrationId) {
$this->results[$registrationId] = $response['results'][$key];
}
} else {
$this->results[$message->getRecipients()] = $response['results'];
}
}
}
| bigbank-as/GCM | src/Bigbank/Gcm/Response.php | PHP | apache-2.0 | 6,729 |
__Description__: If there is a `applyTo: <value>` key/value pair in the `option` object within a `common` and `static` `state` object then said `applyTo` value should be applied to the scooped level at which it's declared in both `<selector>:<state>` and `<selector>:not(:<state>)`
__Notes__
- `applyTo` differs from `appendTo` in that there is a space in between the scooped level value and the `applyTo` value
+ `applyTo` -> `<.level> <applyTo>`
+ `appendTo` -> `<.level><appendTo>` | ctr-lang/ctr | __tests__/cases-core/state/option/target/applyTo/on-non-common-static.md | Markdown | apache-2.0 | 494 |
package org.jboss.examples.ticketmonster.model;
import static javax.persistence.CascadeType.ALL;
import static javax.persistence.FetchType.EAGER;
import static javax.persistence.GenerationType.IDENTITY;
import java.io.Serializable;
import java.util.HashSet;
import java.util.Set;
import javax.persistence.Column;
import javax.persistence.Entity;
import javax.persistence.GeneratedValue;
import javax.persistence.Id;
import javax.persistence.ManyToOne;
import javax.persistence.OneToMany;
import org.hibernate.validator.constraints.NotEmpty;
/**
* <p>
* Represents a single venue
* </p>
*
* @author Shane Bryzak
* @author Pete Muir
*/
/*
* We suppress the warning about not specifying a serialVersionUID, as we are still developing this app, and want the JVM to
* generate the serialVersionUID for us. When we put this app into production, we'll generate and embed the serialVersionUID
*/
@SuppressWarnings("serial")
@Entity
public class Venue implements Serializable {
/* Declaration of fields */
/**
* The synthetic id of the object.
*/
@Id
@GeneratedValue(strategy = IDENTITY)
private Long id;
/**
* <p>
* The name of the event.
* </p>
*
* <p>
* The name of the event forms it's natural identity and cannot be shared between events.
* </p>
*
* <p>
* The name must not be null and must be one or more characters, the Bean Validation constrain <code>@NotEmpty</code>
* enforces this.
* </p>
*/
@Column(unique = true)
@NotEmpty
private String name;
/**
* The address of the venue
*/
private Address address = new Address();
/**
* A description of the venue
*/
private String description;
/**
* <p>
* A set of sections in the venue
* </p>
*
* <p>
* The <code>@OneToMany<code> JPA mapping establishes this relationship. TODO Explain EAGER fetch.
* This relationship is bi-directional (a section knows which venue it is part of), and the <code>mappedBy</code>
* attribute establishes this. We cascade all persistence operations to the set of performances, so, for example if a venue
* is removed, then all of it's sections will also be removed.
* </p>
*/
@OneToMany(cascade = ALL, fetch = EAGER, mappedBy = "venue")
private Set<Section> sections = new HashSet<Section>();
/**
* The capacity of the venue
*/
private int capacity;
/**
* An optional media item to entice punters to the venue. The <code>@ManyToOne</code> establishes the relationship.
*/
@ManyToOne
private MediaItem mediaItem;
/* Boilerplate getters and setters */
public Long getId() {
return id;
}
public void setId(Long id) {
this.id = id;
}
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
public Address getAddress() {
return address;
}
public void setAddress(Address address) {
this.address = address;
}
public MediaItem getMediaItem() {
return mediaItem;
}
public void setMediaItem(MediaItem description) {
this.mediaItem = description;
}
public String getDescription() {
return description;
}
public void setDescription(String description) {
this.description = description;
}
public Set<Section> getSections() {
return sections;
}
public void setSections(Set<Section> sections) {
this.sections = sections;
}
public int getCapacity() {
return capacity;
}
public void setCapacity(int capacity) {
this.capacity = capacity;
}
/* toString(), equals() and hashCode() for Venue, using the natural identity of the object */
@Override
public boolean equals(Object o) {
if (this == o)
return true;
if (o == null || getClass() != o.getClass())
return false;
Venue venue = (Venue) o;
if (address != null ? !address.equals(venue.address) : venue.address != null)
return false;
if (name != null ? !name.equals(venue.name) : venue.name != null)
return false;
return true;
}
@Override
public int hashCode() {
int result = name != null ? name.hashCode() : 0;
result = 31 * result + (address != null ? address.hashCode() : 0);
return result;
}
@Override
public String toString() {
return name;
}
}
| jim-minter/ose3-demos | git/monster/src/main/java/org/jboss/examples/ticketmonster/model/Venue.java | Java | apache-2.0 | 4,603 |
# 0.2.1
Handle unspecified data bag items gracefully instead of raising an uncaught exception.
# 0.2.0
Added support for creating mappings per layer in addition to per hostname.
# 0.1.0
Initial release of aws-elb-registration
| antiuniverse/aws-elb-registration-cookbook | CHANGELOG.md | Markdown | apache-2.0 | 233 |
# Dermatea pallidula Cooke SPECIES
#### Status
ACCEPTED
#### According to
Index Fungorum
#### Published in
null
#### Original name
Dermatea pallidula Cooke
### Remarks
null | mdoering/backbone | life/Fungi/Ascomycota/Leotiomycetes/Helotiales/Dermateaceae/Dermea/Dermatea pallidula/README.md | Markdown | apache-2.0 | 177 |
/*
* Copyright (C) 2011 The Guava Authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.common.hash;
import com.google.common.base.Charsets;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import junit.framework.TestCase;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Arrays;
/**
* Tests for the MessageDigestHashFunction.
*
* @author Kurt Alfred Kluever
*/
public class MessageDigestHashFunctionTest extends TestCase {
private static final ImmutableSet<String> INPUTS = ImmutableSet.of("", "Z", "foobar");
// From "How Provider Implementations Are Requested and Supplied" from
// http://docs.oracle.com/javase/6/docs/technotes/guides/security/crypto/CryptoSpec.html
// - Some providers may choose to also include alias names.
// - For example, the "SHA-1" algorithm might be referred to as "SHA1".
// - The algorithm name is not case-sensitive.
private static final ImmutableMap<String, HashFunction> ALGORITHMS =
new ImmutableMap.Builder<String, HashFunction>()
.put("MD5", Hashing.md5())
.put("SHA", Hashing.sha1()) // Not the official name, but still works
.put("SHA1", Hashing.sha1()) // Not the official name, but still works
.put("sHa-1", Hashing.sha1()) // Not the official name, but still works
.put("SHA-1", Hashing.sha1())
.put("SHA-256", Hashing.sha256())
.put("SHA-384", Hashing.sha384())
.put("SHA-512", Hashing.sha512())
.build();
public void testHashing() {
for (String stringToTest : INPUTS) {
for (String algorithmToTest : ALGORITHMS.keySet()) {
assertMessageDigestHashing(HashTestUtils.ascii(stringToTest), algorithmToTest);
}
}
}
public void testPutAfterHash() {
Hasher sha1 = Hashing.sha1().newHasher();
assertEquals("2fd4e1c67a2d28fced849ee1bb76e7391b93eb12",
sha1.putString("The quick brown fox jumps over the lazy dog", Charsets.UTF_8)
.hash()
.toString());
try {
sha1.putInt(42);
fail();
} catch (IllegalStateException expected) {
}
}
public void testHashTwice() {
Hasher sha1 = Hashing.sha1().newHasher();
assertEquals("2fd4e1c67a2d28fced849ee1bb76e7391b93eb12",
sha1.putString("The quick brown fox jumps over the lazy dog", Charsets.UTF_8)
.hash()
.toString());
try {
HashCode unused = sha1.hash();
fail();
} catch (IllegalStateException expected) {
}
}
public void testToString() {
assertEquals("Hashing.md5()", Hashing.md5().toString());
assertEquals("Hashing.sha1()", Hashing.sha1().toString());
assertEquals("Hashing.sha256()", Hashing.sha256().toString());
assertEquals("Hashing.sha512()", Hashing.sha512().toString());
}
private static void assertMessageDigestHashing(byte[] input, String algorithmName) {
try {
MessageDigest digest = MessageDigest.getInstance(algorithmName);
assertEquals(
HashCode.fromBytes(digest.digest(input)),
ALGORITHMS.get(algorithmName).hashBytes(input));
for (int bytes = 4; bytes <= digest.getDigestLength(); bytes++) {
assertEquals(
HashCode.fromBytes(Arrays.copyOf(digest.digest(input), bytes)),
new MessageDigestHashFunction(algorithmName, bytes, algorithmName).hashBytes(input));
}
try {
int maxSize = digest.getDigestLength();
new MessageDigestHashFunction(algorithmName, maxSize + 1, algorithmName);
fail();
} catch (IllegalArgumentException expected) {
}
} catch (NoSuchAlgorithmException nsae) {
throw new AssertionError(nsae);
}
}
}
| aiyanbo/guava | guava-tests/test/com/google/common/hash/MessageDigestHashFunctionTest.java | Java | apache-2.0 | 4,293 |
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>OR-Tools</title>
<meta http-equiv="Content-Type" content="text/html;"/>
<meta charset="utf-8"/>
<!--<link rel='stylesheet' type='text/css' href="https://fonts.googleapis.com/css?family=Ubuntu:400,700,400italic"/>-->
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="styleSheet.tmp.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<div id="banner-container">
<div id="banner">
<span id="sfml">Google OR-Tools 9.2</span>
</div>
</div>
<div id="content" style="width: 100%; overflow: hidden;">
<div style="margin-left: 15px; margin-top: 5px; float: left; color: #145A32;">
<h2>Java Reference</h2>
<ul>
<li><a href="../java/namespacecom_1_1google_1_1ortools_1_1sat.html">CP-SAT</a></li>
<li><a href="../java/namespacecom_1_1google_1_1ortools_1_1graph.html">Graph</a></li>
<li><a href="../java/namespacecom_1_1google_1_1ortools_1_1algorithms.html">Knapsack solver</a></li>
<li><a href="../java/namespacecom_1_1google_1_1ortools_1_1linearsolver.html">Linear solver</a></li>
<li><a href="../java/namespacecom_1_1google_1_1ortools_1_1constraintsolver.html">Routing</a></li>
<li><a href="../java/namespacecom_1_1google_1_1ortools_1_1util.html">Util</a></li>
</ul>
</div>
<div id="content">
<div align="center">
<h1 style="color: #145A32;">Java Reference</h1>
</div>
<!-- Generated by Doxygen 1.9.2 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&dn=expat.txt MIT */
var searchBox = new SearchBox("searchBox", "search",'Search','.html');
/* @license-end */
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&dn=expat.txt MIT */
$(function() {
initMenu('',true,false,'search.php','Search');
$(document).ready(function() { init_search(); });
});
/* @license-end */
</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
<div id="nav-tree">
<div id="nav-tree-contents">
<div id="nav-sync" class="sync"></div>
</div>
</div>
<div id="splitbar" style="-moz-user-select:none;"
class="ui-resizable-handle">
</div>
</div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:d3d9a9a6595521f9666a5e94cc830dab83b65699&dn=expat.txt MIT */
$(document).ready(function(){initNavTree('interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html',''); initResizable(); });
/* @license-end */
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div class="header">
<div class="summary">
<a href="#pub-methods">Public Member Functions</a> |
<a href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder-members.html">List of all members</a> </div>
<div class="headertitle"><div class="title">IntervalConstraintProtoOrBuilder</div></div>
</div><!--header-->
<div class="contents">
<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
<div class="textblock">
<p class="definition">Definition at line <a class="el" href="IntervalConstraintProtoOrBuilder_8java_source.html#l00006">6</a> of file <a class="el" href="IntervalConstraintProtoOrBuilder_8java_source.html">IntervalConstraintProtoOrBuilder.java</a>.</p>
</div><table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a id="pub-methods" name="pub-methods"></a>
Public Member Functions</h2></td></tr>
<tr class="memitem:af9618a9e1f1a516f3afe9accf2f68e9e"><td class="memItemLeft" align="right" valign="top">boolean </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#af9618a9e1f1a516f3afe9accf2f68e9e">hasStart</a> ()</td></tr>
<tr class="separator:af9618a9e1f1a516f3afe9accf2f68e9e"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:a8471b7bf1bceb8a6b370d0b4f61cc6da"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProto.html">com.google.ortools.sat.LinearExpressionProto</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a8471b7bf1bceb8a6b370d0b4f61cc6da">getStart</a> ()</td></tr>
<tr class="separator:a8471b7bf1bceb8a6b370d0b4f61cc6da"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:a5e52f9711ecacca9fc2b3b02f0a524bf"><td class="memItemLeft" align="right" valign="top"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProtoOrBuilder.html">com.google.ortools.sat.LinearExpressionProtoOrBuilder</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a5e52f9711ecacca9fc2b3b02f0a524bf">getStartOrBuilder</a> ()</td></tr>
<tr class="separator:a5e52f9711ecacca9fc2b3b02f0a524bf"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:a9b0197a2b2718c7b0061d19d4b1fbcb4"><td class="memItemLeft" align="right" valign="top">boolean </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a9b0197a2b2718c7b0061d19d4b1fbcb4">hasEnd</a> ()</td></tr>
<tr class="memdesc:a9b0197a2b2718c7b0061d19d4b1fbcb4"><td class="mdescLeft"> </td><td class="mdescRight"><code>.operations_research.sat.LinearExpressionProto end = 5;</code> <a href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a9b0197a2b2718c7b0061d19d4b1fbcb4">More...</a><br /></td></tr>
<tr class="separator:a9b0197a2b2718c7b0061d19d4b1fbcb4"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:a13b3a6bdbc3183c45d0197e8d7171849"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProto.html">com.google.ortools.sat.LinearExpressionProto</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a13b3a6bdbc3183c45d0197e8d7171849">getEnd</a> ()</td></tr>
<tr class="memdesc:a13b3a6bdbc3183c45d0197e8d7171849"><td class="mdescLeft"> </td><td class="mdescRight"><code>.operations_research.sat.LinearExpressionProto end = 5;</code> <a href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a13b3a6bdbc3183c45d0197e8d7171849">More...</a><br /></td></tr>
<tr class="separator:a13b3a6bdbc3183c45d0197e8d7171849"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:aac9907139f4212fc3afeb8db5d2c6645"><td class="memItemLeft" align="right" valign="top"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProtoOrBuilder.html">com.google.ortools.sat.LinearExpressionProtoOrBuilder</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#aac9907139f4212fc3afeb8db5d2c6645">getEndOrBuilder</a> ()</td></tr>
<tr class="memdesc:aac9907139f4212fc3afeb8db5d2c6645"><td class="mdescLeft"> </td><td class="mdescRight"><code>.operations_research.sat.LinearExpressionProto end = 5;</code> <a href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#aac9907139f4212fc3afeb8db5d2c6645">More...</a><br /></td></tr>
<tr class="separator:aac9907139f4212fc3afeb8db5d2c6645"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:a3ad38ce6c081e909851785725d3c4f8a"><td class="memItemLeft" align="right" valign="top">boolean </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a3ad38ce6c081e909851785725d3c4f8a">hasSize</a> ()</td></tr>
<tr class="memdesc:a3ad38ce6c081e909851785725d3c4f8a"><td class="mdescLeft"> </td><td class="mdescRight"><code>.operations_research.sat.LinearExpressionProto size = 6;</code> <a href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a3ad38ce6c081e909851785725d3c4f8a">More...</a><br /></td></tr>
<tr class="separator:a3ad38ce6c081e909851785725d3c4f8a"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:aa15366d92d2522f2c4bbb87ccbda5047"><td class="memItemLeft" align="right" valign="top"><a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProto.html">com.google.ortools.sat.LinearExpressionProto</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#aa15366d92d2522f2c4bbb87ccbda5047">getSize</a> ()</td></tr>
<tr class="memdesc:aa15366d92d2522f2c4bbb87ccbda5047"><td class="mdescLeft"> </td><td class="mdescRight"><code>.operations_research.sat.LinearExpressionProto size = 6;</code> <a href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#aa15366d92d2522f2c4bbb87ccbda5047">More...</a><br /></td></tr>
<tr class="separator:aa15366d92d2522f2c4bbb87ccbda5047"><td class="memSeparator" colspan="2"> </td></tr>
<tr class="memitem:a237a9bec82dc82d4048ff2ab810601e2"><td class="memItemLeft" align="right" valign="top"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProtoOrBuilder.html">com.google.ortools.sat.LinearExpressionProtoOrBuilder</a> </td><td class="memItemRight" valign="bottom"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a237a9bec82dc82d4048ff2ab810601e2">getSizeOrBuilder</a> ()</td></tr>
<tr class="memdesc:a237a9bec82dc82d4048ff2ab810601e2"><td class="mdescLeft"> </td><td class="mdescRight"><code>.operations_research.sat.LinearExpressionProto size = 6;</code> <a href="interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html#a237a9bec82dc82d4048ff2ab810601e2">More...</a><br /></td></tr>
<tr class="separator:a237a9bec82dc82d4048ff2ab810601e2"><td class="memSeparator" colspan="2"> </td></tr>
</table>
<h2 class="groupheader">Member Function Documentation</h2>
<a id="a13b3a6bdbc3183c45d0197e8d7171849" name="a13b3a6bdbc3183c45d0197e8d7171849"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a13b3a6bdbc3183c45d0197e8d7171849">◆ </a></span>getEnd()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname"><a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProto.html">com.google.ortools.sat.LinearExpressionProto</a> getEnd </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<p><code>.operations_research.sat.LinearExpressionProto end = 5;</code> </p>
<dl class="section return"><dt>Returns</dt><dd>The end. </dd></dl>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#ac9171ca504d921151aeb477411c3b87d">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#a13b3a6bdbc3183c45d0197e8d7171849">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="aac9907139f4212fc3afeb8db5d2c6645" name="aac9907139f4212fc3afeb8db5d2c6645"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aac9907139f4212fc3afeb8db5d2c6645">◆ </a></span>getEndOrBuilder()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProtoOrBuilder.html">com.google.ortools.sat.LinearExpressionProtoOrBuilder</a> getEndOrBuilder </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<p><code>.operations_research.sat.LinearExpressionProto end = 5;</code> </p>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#ab7d75ba562819ebaf4f3174a34bae7c1">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#aac9907139f4212fc3afeb8db5d2c6645">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="aa15366d92d2522f2c4bbb87ccbda5047" name="aa15366d92d2522f2c4bbb87ccbda5047"></a>
<h2 class="memtitle"><span class="permalink"><a href="#aa15366d92d2522f2c4bbb87ccbda5047">◆ </a></span>getSize()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname"><a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProto.html">com.google.ortools.sat.LinearExpressionProto</a> getSize </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<p><code>.operations_research.sat.LinearExpressionProto size = 6;</code> </p>
<dl class="section return"><dt>Returns</dt><dd>The size. </dd></dl>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#aaf089b475af5c0506025e946bb3cb054">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#aa15366d92d2522f2c4bbb87ccbda5047">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="a237a9bec82dc82d4048ff2ab810601e2" name="a237a9bec82dc82d4048ff2ab810601e2"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a237a9bec82dc82d4048ff2ab810601e2">◆ </a></span>getSizeOrBuilder()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProtoOrBuilder.html">com.google.ortools.sat.LinearExpressionProtoOrBuilder</a> getSizeOrBuilder </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<p><code>.operations_research.sat.LinearExpressionProto size = 6;</code> </p>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#aa3cd3b64451c6eb1510d64b4802d78e3">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#a237a9bec82dc82d4048ff2ab810601e2">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="a8471b7bf1bceb8a6b370d0b4f61cc6da" name="a8471b7bf1bceb8a6b370d0b4f61cc6da"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a8471b7bf1bceb8a6b370d0b4f61cc6da">◆ </a></span>getStart()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname"><a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProto.html">com.google.ortools.sat.LinearExpressionProto</a> getStart </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<pre>
IMPORTANT: For now, this constraint do not enforce any relations on the
view, and a linear constraint must be added together with this to enforce
enforcement => start + size == end. An enforcement => size >=0 might also
be needed.
IMPORTANT: For now, we just support affine relation. We could easily
create an intermediate variable to support full linear expression, but this
isn't done currently.
</pre><p ><code>.operations_research.sat.LinearExpressionProto start = 4;</code> </p><dl class="section return"><dt>Returns</dt><dd>The start. </dd></dl>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#a2c4b3e0b0fbe2599af27edb00d47b759">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#a8471b7bf1bceb8a6b370d0b4f61cc6da">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="a5e52f9711ecacca9fc2b3b02f0a524bf" name="a5e52f9711ecacca9fc2b3b02f0a524bf"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a5e52f9711ecacca9fc2b3b02f0a524bf">◆ </a></span>getStartOrBuilder()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname"><a class="el" href="interfacecom_1_1google_1_1ortools_1_1sat_1_1LinearExpressionProtoOrBuilder.html">com.google.ortools.sat.LinearExpressionProtoOrBuilder</a> getStartOrBuilder </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<pre>
IMPORTANT: For now, this constraint do not enforce any relations on the
view, and a linear constraint must be added together with this to enforce
enforcement => start + size == end. An enforcement => size >=0 might also
be needed.
IMPORTANT: For now, we just support affine relation. We could easily
create an intermediate variable to support full linear expression, but this
isn't done currently.
</pre><p ><code>.operations_research.sat.LinearExpressionProto start = 4;</code> </p>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#a3e71ab24003723fe61b18d77f826c001">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#a5e52f9711ecacca9fc2b3b02f0a524bf">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="a9b0197a2b2718c7b0061d19d4b1fbcb4" name="a9b0197a2b2718c7b0061d19d4b1fbcb4"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a9b0197a2b2718c7b0061d19d4b1fbcb4">◆ </a></span>hasEnd()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">boolean hasEnd </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<p><code>.operations_research.sat.LinearExpressionProto end = 5;</code> </p>
<dl class="section return"><dt>Returns</dt><dd>Whether the end field is set. </dd></dl>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#adb53e4a8cf21af1718b697ba52ee1a15">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#a9b0197a2b2718c7b0061d19d4b1fbcb4">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="a3ad38ce6c081e909851785725d3c4f8a" name="a3ad38ce6c081e909851785725d3c4f8a"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a3ad38ce6c081e909851785725d3c4f8a">◆ </a></span>hasSize()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">boolean hasSize </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<p><code>.operations_research.sat.LinearExpressionProto size = 6;</code> </p>
<dl class="section return"><dt>Returns</dt><dd>Whether the size field is set. </dd></dl>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#aae9643420ff88cb4c38c8e9181dd35ac">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#a3ad38ce6c081e909851785725d3c4f8a">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<a id="af9618a9e1f1a516f3afe9accf2f68e9e" name="af9618a9e1f1a516f3afe9accf2f68e9e"></a>
<h2 class="memtitle"><span class="permalink"><a href="#af9618a9e1f1a516f3afe9accf2f68e9e">◆ </a></span>hasStart()</h2>
<div class="memitem">
<div class="memproto">
<table class="memname">
<tr>
<td class="memname">boolean hasStart </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</div><div class="memdoc">
<pre>
IMPORTANT: For now, this constraint do not enforce any relations on the
view, and a linear constraint must be added together with this to enforce
enforcement => start + size == end. An enforcement => size >=0 might also
be needed.
IMPORTANT: For now, we just support affine relation. We could easily
create an intermediate variable to support full linear expression, but this
isn't done currently.
</pre><p ><code>.operations_research.sat.LinearExpressionProto start = 4;</code> </p><dl class="section return"><dt>Returns</dt><dd>Whether the start field is set. </dd></dl>
<p>Implemented in <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto.html#af42348e54b4d3cb22d8020f260aa886c">IntervalConstraintProto</a>, and <a class="el" href="classcom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProto_1_1Builder.html#af9618a9e1f1a516f3afe9accf2f68e9e">IntervalConstraintProto.Builder</a>.</p>
</div>
</div>
<hr/>The documentation for this interface was generated from the following file:<ul>
<li><a class="el" href="IntervalConstraintProtoOrBuilder_8java_source.html">IntervalConstraintProtoOrBuilder.java</a></li>
</ul>
</div><!-- contents -->
</div><!-- doc-content -->
</div>
</div>
<div id="footer-container">
<div id="footer">
</div>
</div>
</body>
</html>
| google/or-tools | docs/java/interfacecom_1_1google_1_1ortools_1_1sat_1_1IntervalConstraintProtoOrBuilder.html | HTML | apache-2.0 | 23,010 |
import re
import unicodedata
from collections import defaultdict
from typing import Any, Dict, List, Optional, Sequence, Union
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db.models.query import QuerySet
from django.forms.models import model_to_dict
from django.utils.translation import gettext as _
from typing_extensions import TypedDict
from zulip_bots.custom_exceptions import ConfigValidationError
from zerver.lib.avatar import avatar_url, get_avatar_field
from zerver.lib.cache import (
bulk_cached_fetch,
realm_user_dict_fields,
user_profile_by_id_cache_key,
user_profile_cache_key_id,
)
from zerver.lib.exceptions import OrganizationAdministratorRequired
from zerver.lib.request import JsonableError
from zerver.lib.timezone import canonicalize_timezone
from zerver.models import (
CustomProfileField,
CustomProfileFieldValue,
Realm,
Service,
UserProfile,
get_realm_user_dicts,
get_user_profile_by_id_in_realm,
)
def check_full_name(full_name_raw: str) -> str:
full_name = full_name_raw.strip()
if len(full_name) > UserProfile.MAX_NAME_LENGTH:
raise JsonableError(_("Name too long!"))
if len(full_name) < UserProfile.MIN_NAME_LENGTH:
raise JsonableError(_("Name too short!"))
for character in full_name:
if unicodedata.category(character)[0] == "C" or character in UserProfile.NAME_INVALID_CHARS:
raise JsonableError(_("Invalid characters in name!"))
# Names ending with e.g. `|15` could be ambiguous for
# sloppily-written parsers of our Markdown syntax for mentioning
# users with ambiguous names, and likely have no real use, so we
# ban them.
if re.search(r"\|\d+$", full_name_raw):
raise JsonableError(_("Invalid format!"))
return full_name
# NOTE: We don't try to absolutely prevent 2 bots from having the same
# name (e.g. you can get there by reactivating a deactivated bot after
# making a new bot with the same name). This is just a check designed
# to make it unlikely to happen by accident.
def check_bot_name_available(realm_id: int, full_name: str) -> None:
dup_exists = UserProfile.objects.filter(
realm_id=realm_id,
full_name=full_name.strip(),
is_active=True,
).exists()
if dup_exists:
raise JsonableError(_("Name is already in use!"))
def check_short_name(short_name_raw: str) -> str:
short_name = short_name_raw.strip()
if len(short_name) == 0:
raise JsonableError(_("Bad name or username"))
return short_name
def check_valid_bot_config(bot_type: int, service_name: str, config_data: Dict[str, str]) -> None:
if bot_type == UserProfile.INCOMING_WEBHOOK_BOT:
from zerver.lib.integrations import WEBHOOK_INTEGRATIONS
config_options = None
for integration in WEBHOOK_INTEGRATIONS:
if integration.name == service_name:
# key: validator
config_options = {c[1]: c[2] for c in integration.config_options}
break
if not config_options:
raise JsonableError(_("Invalid integration '{}'.").format(service_name))
missing_keys = set(config_options.keys()) - set(config_data.keys())
if missing_keys:
raise JsonableError(
_("Missing configuration parameters: {}").format(
missing_keys,
)
)
for key, validator in config_options.items():
value = config_data[key]
error = validator(key, value)
if error:
raise JsonableError(_("Invalid {} value {} ({})").format(key, value, error))
elif bot_type == UserProfile.EMBEDDED_BOT:
try:
from zerver.lib.bot_lib import get_bot_handler
bot_handler = get_bot_handler(service_name)
if hasattr(bot_handler, "validate_config"):
bot_handler.validate_config(config_data)
except ConfigValidationError:
# The exception provides a specific error message, but that
# message is not tagged translatable, because it is
# triggered in the external zulip_bots package.
# TODO: Think of some clever way to provide a more specific
# error message.
raise JsonableError(_("Invalid configuration data!"))
# Adds an outgoing webhook or embedded bot service.
def add_service(
name: str,
user_profile: UserProfile,
base_url: Optional[str] = None,
interface: Optional[int] = None,
token: Optional[str] = None,
) -> None:
Service.objects.create(
name=name, user_profile=user_profile, base_url=base_url, interface=interface, token=token
)
def check_bot_creation_policy(user_profile: UserProfile, bot_type: int) -> None:
# Realm administrators can always add bot
if user_profile.is_realm_admin:
return
if user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_EVERYONE:
return
if user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_ADMINS_ONLY:
raise OrganizationAdministratorRequired()
if (
user_profile.realm.bot_creation_policy == Realm.BOT_CREATION_LIMIT_GENERIC_BOTS
and bot_type == UserProfile.DEFAULT_BOT
):
raise OrganizationAdministratorRequired()
def check_valid_bot_type(user_profile: UserProfile, bot_type: int) -> None:
if bot_type not in user_profile.allowed_bot_types:
raise JsonableError(_("Invalid bot type"))
def check_valid_interface_type(interface_type: Optional[int]) -> None:
if interface_type not in Service.ALLOWED_INTERFACE_TYPES:
raise JsonableError(_("Invalid interface type"))
def is_administrator_role(role: int) -> bool:
return role in {UserProfile.ROLE_REALM_ADMINISTRATOR, UserProfile.ROLE_REALM_OWNER}
def bulk_get_users(
emails: List[str], realm: Optional[Realm], base_query: "QuerySet[UserProfile]" = None
) -> Dict[str, UserProfile]:
if base_query is None:
assert realm is not None
query = UserProfile.objects.filter(realm=realm, is_active=True)
realm_id = realm.id
else:
# WARNING: Currently, this code path only really supports one
# version of `base_query` being used (because otherwise,
# they'll share the cache, which can screw up the filtering).
# If you're using this flow, you'll need to re-do any filters
# in base_query in the code itself; base_query is just a perf
# optimization.
query = base_query
realm_id = 0
def fetch_users_by_email(emails: List[str]) -> List[UserProfile]:
# This should be just
#
# UserProfile.objects.select_related("realm").filter(email__iexact__in=emails,
# realm=realm)
#
# But chaining __in and __iexact doesn't work with Django's
# ORM, so we have the following hack to construct the relevant where clause
where_clause = "upper(zerver_userprofile.email::text) IN (SELECT upper(email) FROM unnest(%s) AS email)"
return query.select_related("realm").extra(where=[where_clause], params=(emails,))
def user_to_email(user_profile: UserProfile) -> str:
return user_profile.email.lower()
return bulk_cached_fetch(
# Use a separate cache key to protect us from conflicts with
# the get_user cache.
lambda email: "bulk_get_users:" + user_profile_cache_key_id(email, realm_id),
fetch_users_by_email,
[email.lower() for email in emails],
id_fetcher=user_to_email,
)
def get_user_id(user: UserProfile) -> int:
return user.id
def user_ids_to_users(user_ids: Sequence[int], realm: Realm) -> List[UserProfile]:
# TODO: Consider adding a flag to control whether deactivated
# users should be included.
def fetch_users_by_id(user_ids: List[int]) -> List[UserProfile]:
return list(UserProfile.objects.filter(id__in=user_ids).select_related())
user_profiles_by_id: Dict[int, UserProfile] = bulk_cached_fetch(
cache_key_function=user_profile_by_id_cache_key,
query_function=fetch_users_by_id,
object_ids=user_ids,
id_fetcher=get_user_id,
)
found_user_ids = user_profiles_by_id.keys()
missed_user_ids = [user_id for user_id in user_ids if user_id not in found_user_ids]
if missed_user_ids:
raise JsonableError(_("Invalid user ID: {}").format(missed_user_ids[0]))
user_profiles = list(user_profiles_by_id.values())
for user_profile in user_profiles:
if user_profile.realm != realm:
raise JsonableError(_("Invalid user ID: {}").format(user_profile.id))
return user_profiles
def access_bot_by_id(user_profile: UserProfile, user_id: int) -> UserProfile:
try:
target = get_user_profile_by_id_in_realm(user_id, user_profile.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("No such bot"))
if not target.is_bot:
raise JsonableError(_("No such bot"))
if not user_profile.can_admin_user(target):
raise JsonableError(_("Insufficient permission"))
return target
def access_user_by_id(
user_profile: UserProfile,
target_user_id: int,
*,
allow_deactivated: bool = False,
allow_bots: bool = False,
for_admin: bool,
) -> UserProfile:
"""Master function for accessing another user by ID in API code;
verifies the user ID is in the same realm, and if requested checks
for administrative privileges, with flags for various special
cases.
"""
try:
target = get_user_profile_by_id_in_realm(target_user_id, user_profile.realm)
except UserProfile.DoesNotExist:
raise JsonableError(_("No such user"))
if target.is_bot and not allow_bots:
raise JsonableError(_("No such user"))
if not target.is_active and not allow_deactivated:
raise JsonableError(_("User is deactivated"))
if not for_admin:
# Administrative access is not required just to read a user.
return target
if not user_profile.can_admin_user(target):
raise JsonableError(_("Insufficient permission"))
return target
class Accounts(TypedDict):
realm_name: str
realm_id: int
full_name: str
avatar: Optional[str]
def get_accounts_for_email(email: str) -> List[Accounts]:
profiles = (
UserProfile.objects.select_related("realm")
.filter(
delivery_email__iexact=email.strip(),
is_active=True,
realm__deactivated=False,
is_bot=False,
)
.order_by("date_joined")
)
accounts: List[Accounts] = []
for profile in profiles:
accounts.append(
dict(
realm_name=profile.realm.name,
realm_id=profile.realm.id,
full_name=profile.full_name,
avatar=avatar_url(profile),
)
)
return accounts
def get_api_key(user_profile: UserProfile) -> str:
return user_profile.api_key
def get_all_api_keys(user_profile: UserProfile) -> List[str]:
# Users can only have one API key for now
return [user_profile.api_key]
def validate_user_custom_profile_field(
realm_id: int, field: CustomProfileField, value: Union[int, str, List[int]]
) -> Union[int, str, List[int]]:
validators = CustomProfileField.FIELD_VALIDATORS
field_type = field.field_type
var_name = f"{field.name}"
if field_type in validators:
validator = validators[field_type]
return validator(var_name, value)
elif field_type == CustomProfileField.SELECT:
choice_field_validator = CustomProfileField.SELECT_FIELD_VALIDATORS[field_type]
field_data = field.field_data
# Put an assertion so that mypy doesn't complain.
assert field_data is not None
return choice_field_validator(var_name, field_data, value)
elif field_type == CustomProfileField.USER:
user_field_validator = CustomProfileField.USER_FIELD_VALIDATORS[field_type]
return user_field_validator(realm_id, value, False)
else:
raise AssertionError("Invalid field type")
def validate_user_custom_profile_data(
realm_id: int, profile_data: List[Dict[str, Union[int, str, List[int]]]]
) -> None:
# This function validate all custom field values according to their field type.
for item in profile_data:
field_id = item["id"]
try:
field = CustomProfileField.objects.get(id=field_id)
except CustomProfileField.DoesNotExist:
raise JsonableError(_("Field id {id} not found.").format(id=field_id))
try:
validate_user_custom_profile_field(realm_id, field, item["value"])
except ValidationError as error:
raise JsonableError(error.message)
def can_access_delivery_email(user_profile: UserProfile) -> bool:
realm = user_profile.realm
if realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_ADMINS:
return user_profile.is_realm_admin
if realm.email_address_visibility == Realm.EMAIL_ADDRESS_VISIBILITY_MODERATORS:
return user_profile.is_realm_admin or user_profile.is_moderator
return False
def format_user_row(
realm: Realm,
acting_user: Optional[UserProfile],
row: Dict[str, Any],
client_gravatar: bool,
user_avatar_url_field_optional: bool,
custom_profile_field_data: Optional[Dict[str, Any]] = None,
) -> Dict[str, Any]:
"""Formats a user row returned by a database fetch using
.values(*realm_user_dict_fields) into a dictionary representation
of that user for API delivery to clients. The acting_user
argument is used for permissions checks.
"""
is_admin = is_administrator_role(row["role"])
is_owner = row["role"] == UserProfile.ROLE_REALM_OWNER
is_guest = row["role"] == UserProfile.ROLE_GUEST
is_bot = row["is_bot"]
result = dict(
email=row["email"],
user_id=row["id"],
avatar_version=row["avatar_version"],
is_admin=is_admin,
is_owner=is_owner,
is_guest=is_guest,
is_billing_admin=row["is_billing_admin"],
role=row["role"],
is_bot=is_bot,
full_name=row["full_name"],
timezone=canonicalize_timezone(row["timezone"]),
is_active=row["is_active"],
date_joined=row["date_joined"].isoformat(),
)
# Zulip clients that support using `GET /avatar/{user_id}` as a
# fallback if we didn't send an avatar URL in the user object pass
# user_avatar_url_field_optional in client_capabilities.
#
# This is a major network performance optimization for
# organizations with 10,000s of users where we would otherwise
# send avatar URLs in the payload (either because most users have
# uploaded avatars or because EMAIL_ADDRESS_VISIBILITY_ADMINS
# prevents the older client_gravatar optimization from helping).
# The performance impact is large largely because the hashes in
# avatar URLs structurally cannot compress well.
#
# The user_avatar_url_field_optional gives the server sole
# discretion in deciding for which users we want to send the
# avatar URL (Which saves clients an RTT at the cost of some
# bandwidth). At present, the server looks at `long_term_idle` to
# decide which users to include avatars for, piggy-backing on a
# different optimization for organizations with 10,000s of users.
include_avatar_url = not user_avatar_url_field_optional or not row["long_term_idle"]
if include_avatar_url:
result["avatar_url"] = get_avatar_field(
user_id=row["id"],
realm_id=realm.id,
email=row["delivery_email"],
avatar_source=row["avatar_source"],
avatar_version=row["avatar_version"],
medium=False,
client_gravatar=client_gravatar,
)
if acting_user is not None and can_access_delivery_email(acting_user):
result["delivery_email"] = row["delivery_email"]
if is_bot:
result["bot_type"] = row["bot_type"]
if row["email"] in settings.CROSS_REALM_BOT_EMAILS:
result["is_cross_realm_bot"] = True
# Note that bot_owner_id can be None with legacy data.
result["bot_owner_id"] = row["bot_owner_id"]
elif custom_profile_field_data is not None:
result["profile_data"] = custom_profile_field_data
return result
def user_profile_to_user_row(user_profile: UserProfile) -> Dict[str, Any]:
# What we're trying to do is simulate the user_profile having been
# fetched from a QuerySet using `.values(*realm_user_dict_fields)`
# even though we fetched UserProfile objects. This is messier
# than it seems.
#
# What we'd like to do is just call model_to_dict(user,
# fields=realm_user_dict_fields). The problem with this is
# that model_to_dict has a different convention than
# `.values()` in its handling of foreign keys, naming them as
# e.g. `bot_owner`, not `bot_owner_id`; we work around that
# here.
#
# This could be potentially simplified in the future by
# changing realm_user_dict_fields to name the bot owner with
# the less readable `bot_owner` (instead of `bot_owner_id`).
user_row = model_to_dict(user_profile, fields=[*realm_user_dict_fields, "bot_owner"])
user_row["bot_owner_id"] = user_row["bot_owner"]
del user_row["bot_owner"]
return user_row
def get_cross_realm_dicts() -> List[Dict[str, Any]]:
users = bulk_get_users(
list(settings.CROSS_REALM_BOT_EMAILS),
None,
base_query=UserProfile.objects.filter(realm__string_id=settings.SYSTEM_BOT_REALM),
).values()
result = []
for user in users:
# Important: We filter here, is addition to in
# `base_query`, because of how bulk_get_users shares its
# cache with other UserProfile caches.
if user.realm.string_id != settings.SYSTEM_BOT_REALM: # nocoverage
continue
user_row = user_profile_to_user_row(user)
# Because we want to avoid clients becing exposed to the
# implementation detail that these bots are self-owned, we
# just set bot_owner_id=None.
user_row["bot_owner_id"] = None
result.append(
format_user_row(
user.realm,
acting_user=user,
row=user_row,
client_gravatar=False,
user_avatar_url_field_optional=False,
custom_profile_field_data=None,
)
)
return result
def get_custom_profile_field_values(
custom_profile_field_values: List[CustomProfileFieldValue],
) -> Dict[int, Dict[str, Any]]:
profiles_by_user_id: Dict[int, Dict[str, Any]] = defaultdict(dict)
for profile_field in custom_profile_field_values:
user_id = profile_field.user_profile_id
if profile_field.field.is_renderable():
profiles_by_user_id[user_id][str(profile_field.field_id)] = {
"value": profile_field.value,
"rendered_value": profile_field.rendered_value,
}
else:
profiles_by_user_id[user_id][str(profile_field.field_id)] = {
"value": profile_field.value,
}
return profiles_by_user_id
def get_raw_user_data(
realm: Realm,
acting_user: Optional[UserProfile],
*,
target_user: Optional[UserProfile] = None,
client_gravatar: bool,
user_avatar_url_field_optional: bool,
include_custom_profile_fields: bool = True,
) -> Dict[int, Dict[str, str]]:
"""Fetches data about the target user(s) appropriate for sending to
acting_user via the standard format for the Zulip API. If
target_user is None, we fetch all users in the realm.
"""
profiles_by_user_id = None
custom_profile_field_data = None
# target_user is an optional parameter which is passed when user data of a specific user
# is required. It is 'None' otherwise.
if target_user is not None:
user_dicts = [user_profile_to_user_row(target_user)]
else:
user_dicts = get_realm_user_dicts(realm.id)
if include_custom_profile_fields:
base_query = CustomProfileFieldValue.objects.select_related("field")
# TODO: Consider optimizing this query away with caching.
if target_user is not None:
custom_profile_field_values = base_query.filter(user_profile=target_user)
else:
custom_profile_field_values = base_query.filter(field__realm_id=realm.id)
profiles_by_user_id = get_custom_profile_field_values(custom_profile_field_values)
result = {}
for row in user_dicts:
if profiles_by_user_id is not None:
custom_profile_field_data = profiles_by_user_id.get(row["id"], {})
result[row["id"]] = format_user_row(
realm,
acting_user=acting_user,
row=row,
client_gravatar=client_gravatar,
user_avatar_url_field_optional=user_avatar_url_field_optional,
custom_profile_field_data=custom_profile_field_data,
)
return result
| punchagan/zulip | zerver/lib/users.py | Python | apache-2.0 | 21,312 |
<!DOCTYPE HTML>
<html>
<head>
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>Test Page for sap.m.CheckBox</title>
<script src="../shared-config.js"></script>
<script id="sap-ui-bootstrap" data-sap-ui-noConflict="true"
data-sap-ui-libs="sap.m" src="../../../../resources/sap-ui-core.js">
</script>
<link rel="stylesheet" href="../../../../resources/sap/ui/thirdparty/qunit.css" type="text/css" media="screen">
<script src="../../../../resources/sap/ui/thirdparty/qunit.js"></script>
<script src="../../../../resources/sap/ui/qunit/qunit-junit.js"></script>
<script src="../../../../resources/sap/ui/qunit/QUnitUtils.js"></script>
<script src="../../../../resources/sap/ui/thirdparty/sinon.js"></script>
<script src="../../../../resources/sap/ui/thirdparty/sinon-qunit.js"></script>
<script>
jQuery.sap.require("sap.m.CheckBox");
jQuery.sap.require("sap.ui.core.ValueState");
QUnit.module("Properties");
/* --------------------------------------- */
/* Test: Default Values */
/* --------------------------------------- */
QUnit.test("Default Values", function(assert) {
var bEnabled = true;
var bEditable = true;
var bVisible = true;
var bSelected = false;
var sName = "";
var sText = "";
var sTextDirection = sap.ui.core.TextDirection.Inherit;
var sWidth = "";
// system under test
var oCheckBox = new sap.m.CheckBox();
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.strictEqual(oCheckBox.getEnabled(), bEnabled, "Property 'enabled': Default value should be '" + bEnabled + "'");
assert.strictEqual(oCheckBox.getEditable(), bEditable, "Property 'editable': Default value should be '" + bEditable + "'");
assert.strictEqual(oCheckBox.getVisible(), bVisible, "Property 'visible': Default value should be '" + bVisible + "'");
assert.strictEqual(oCheckBox.getSelected(), bSelected, "Property 'selected': Default value should be '" + bSelected + "'");
assert.strictEqual(oCheckBox.getName(), sName, "Property 'name': Default value should be '" + sName + "'");
assert.strictEqual(oCheckBox.getText(), sText, "Property 'text': Default value should be '" + sText + "'");
assert.strictEqual(oCheckBox.getTextDirection(), sTextDirection, "Property 'textDirection': Default value should be '" + sTextDirection + "'");
assert.strictEqual(oCheckBox.getWidth(), sWidth, "Property 'width': Default value should be '" + sWidth + "'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'visible=true' */
/* ----------------------------------------------- */
QUnit.test("'visible=true'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({visible: true});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(oCheckBox.getDomRef(), "visible=true: CheckBox should have been rendered");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'visible=false' */
/* ----------------------------------------------- */
QUnit.test("'visible=false'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({visible: false});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(!oCheckBox.getDomRef(), "visible=false: CheckBox should not have been rendered");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'enabled=true' */
/* ----------------------------------------------- */
QUnit.test("'enabled=true'", function(assert) {
// system under test
var bEnabled = true;
var oCheckBox = new sap.m.CheckBox({enabled: bEnabled});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(!oCheckBox.$("CbBg").hasClass("sapMCbBgDis"), "enabled=" + bEnabled + ": CheckBox should not have class sapMCbBgDis");
assert.strictEqual(oCheckBox.$("CB").attr("disabled"), undefined, "enabled=" + bEnabled + ": CheckBox should not have attribute 'disabled'");
var iTabindex = oCheckBox.getTabIndex();
assert.strictEqual(oCheckBox.$().attr("tabindex"), iTabindex.toString() , "enabled=" + bEnabled + ": CheckBox should have attribute 'tabindex=" + iTabindex +"'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'enabled=false' */
/* ----------------------------------------------- */
QUnit.test("'enabled=false'", function(assert) {
// system under test
var bEnabled = false;
var oCheckBox = new sap.m.CheckBox({enabled: bEnabled});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(!oCheckBox.$().hasClass("sapMPointer"), "enabled=" + bEnabled + ": CheckBox should not have class sapMPointer");
assert.ok(oCheckBox.$().hasClass("sapMCbBgDis"), "enabled=" + bEnabled + ": CheckBox should have class sapMCbBgDis");
assert.strictEqual(oCheckBox.$("CB").attr("disabled"), "disabled", "enabled=" + bEnabled + ": CheckBox should have attribute 'disabled=disabled'");
assert.strictEqual(oCheckBox.$().attr("aria-disabled"), "true", "Property 'aria-disabled' should be 'true'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'editable=false' */
/* ----------------------------------------------- */
QUnit.test("'editable=false'", function(assert) {
// system under test
var bEditable = false;
var oCheckBox = new sap.m.CheckBox({editable: bEditable});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.strictEqual(oCheckBox.getTabIndex(), 0 , "'getTabindex' should return 0");
assert.equal(oCheckBox.$().hasClass("sapMCbRo"), true, ": The CheckBox should have class sapMCbRo");
assert.strictEqual(oCheckBox.$("CB").attr("readonly"), "readonly", "The Checkbox should have attribute 'readonly=readonly'");
assert.strictEqual(oCheckBox.$().attr("aria-readonly"), "true", "Property 'aria-readonly' should be 'true'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'selected=true' */
/* ----------------------------------------------- */
QUnit.test("'selected=true'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({selected: true});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(oCheckBox.$("CbBg").hasClass("sapMCbMarkChecked"), "selected=true: CheckBox should have class sapMCbMarkChecked");
assert.ok(oCheckBox.$("CB").is(":checked"), "selected=false: CheckBox should have attribute 'checked'");
assert.strictEqual(oCheckBox.$().attr("aria-checked"), "true", "Property 'aria-checked': Default value should be 'true'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'selected=false' */
/* ----------------------------------------------- */
QUnit.test("'selected=false'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({selected: false});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(!oCheckBox.$("CbBg").hasClass("sapMCbMarkChecked"), "selected=false: CheckBox should not have class sapMCbMarkChecked");
assert.ok(!oCheckBox.$("CB").is(":checked"), "selected=false: CheckBox should not have attribute 'checked'");
assert.strictEqual(oCheckBox.$().attr("aria-checked"), "false", "Property 'aria-checked': Default value should be 'false'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'ValueState=Error' */
/* ----------------------------------------------- */
QUnit.test("'ValueState=Error'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({valueState: sap.ui.core.ValueState.Error});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(oCheckBox.$().hasClass("sapMCbErr"), "The CheckBox has value state error css class.");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'ValueState=Warning' */
/* ----------------------------------------------- */
QUnit.test("'ValueState=Warning'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({valueState: sap.ui.core.ValueState.Warning});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(oCheckBox.$().hasClass("sapMCbWarn"), "The CheckBox has value state warning css class.");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'name' */
/* ----------------------------------------------- */
QUnit.test("'name'", function(assert) {
var sName = "my Name";
// system under test
var oCheckBox = new sap.m.CheckBox({name: sName});
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.strictEqual(oCheckBox.$("CB").attr("name"), sName, "Property 'name=" + sName + "': CheckBox input element should have attribute 'name=" + sName + "'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: getTabIndex (enabled=true) */
/* ----------------------------------------------- */
QUnit.test("'getTabIndex (enabled=true)'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({enabled: true});
// assertions
assert.strictEqual(oCheckBox.getTabIndex(), 0 , "'getTabindex' should return 0");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: getTabIndex (enabled=false */
/* ----------------------------------------------- */
QUnit.test("'getTabIndex (enabled=false)'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox({enabled: false});
// assertions
assert.strictEqual(oCheckBox.getTabIndex(), -1 , "'getTabindex' should return -1");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------------------------------- */
/* Test: getTabIndex (tabIndex previously set explicitely via setTabIndex) */
/* ----------------------------------------------------------------------- */
QUnit.test("'getTabIndex (tabIndex previously set explicitely via setTabIndex)'", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox();
// arrange
oCheckBox.setTabIndex(2);
// assertions
assert.strictEqual(oCheckBox.getTabIndex(), 2 , "'getTabindex' should return 2");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'tabIndex' */
/* ----------------------------------------------- */
QUnit.test("'tabIndex'", function(assert) {
var iTabIndex = 2;
// system under test
var oCheckBox = new sap.m.CheckBox();
// arrange
oCheckBox.placeAt("content");
oCheckBox.setTabIndex(iTabIndex);
sap.ui.getCore().applyChanges();
// assertions
assert.strictEqual(oCheckBox.$().attr("tabindex"), iTabIndex.toString() , "Property 'tabIndex=" + iTabIndex + "': CheckBox should have attribute 'tabindex=" + iTabIndex + "'");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: testSetLabelProperty */
/* ----------------------------------------------- */
function testSetLabelProperty(property, value, mode) {
var sPropertyCamelCase = property[0].toUpperCase() + property.slice(1);
var sSetterMethod = "set" + sPropertyCamelCase;
var oSpy = sinon.spy(sap.m.Label.prototype, sSetterMethod);
// system under test
switch (mode) {
case "Constructor":
// set property via contructor
var args = {};
args[property] = value;
var oCheckBox = new sap.m.CheckBox(args);
break;
case "Setter":
// set property via setter method
var oCheckBox = new sap.m.CheckBox();
oCheckBox[sSetterMethod](value);
break;
default: console.error(": wrong argument for parameter 'mode'")
}
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.strictEqual(oSpy.lastCall.args[0], value, "Property '" + property + "=" + value + "'testSetLabelProperty: Corresponding setter method of label control should have been called accordingly");
// cleanup
oCheckBox.destroy();
sap.m.Label.prototype[sSetterMethod].restore();
}
QUnit.test("Should render the text of a Checkbox after rendering the checkbox without setting label properties", function(assert) {
// Arrange
var oCheckBox = new sap.m.CheckBox();
// System under Test
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// Act
oCheckBox.setText("foo");
sap.ui.getCore().applyChanges();
// Assert
assert.ok(oCheckBox.$("label").length);
// Cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: 'text' - via Constructor */
/* ----------------------------------------------- */
QUnit.test("'text' - via Constructor", function(assert) {
testSetLabelProperty("text", "my Text", "Constructor");
});
/* ----------------------------------------------- */
/* Test: 'text' - via Setter Method */
/* ----------------------------------------------- */
QUnit.test("'text' - via Setter Method", function(assert) {
testSetLabelProperty("text", "my Text", "Setter");
});
/* ----------------------------------------------- */
/* Test: 'textDirection' - via Constructor */
/* ----------------------------------------------- */
QUnit.test("'textDirection' - via Constructor", function(assert) {
testSetLabelProperty("textDirection", "RTL", "Constructor");
testSetLabelProperty("textDirection", "LTR", "Constructor");
testSetLabelProperty("textDirection", "Inherit", "Constructor");
});
/* ----------------------------------------------- */
/* Test: 'textDirection' - via Setter Method */
/* ----------------------------------------------- */
QUnit.test("'textDirection' - via Setter Method", function(assert) {
testSetLabelProperty("textDirection", "RTL", "Setter");
testSetLabelProperty("textDirection", "LTR", "Setter");
testSetLabelProperty("textDirection", "Inherit", "Setter");
});
/* ----------------------------------------------- */
/* Test: 'textAlign' - via Constructor */
/* ----------------------------------------------- */
QUnit.test("'textAlign' - via Constructor", function(assert) {
testSetLabelProperty("textAlign", "Begin", "Constructor");
testSetLabelProperty("textAlign", "End", "Constructor");
testSetLabelProperty("textAlign", "Left", "Constructor");
testSetLabelProperty("textAlign", "Right", "Constructor");
testSetLabelProperty("textAlign", "Center", "Constructor");
testSetLabelProperty("textAlign", "Initial", "Constructor");
});
/* ----------------------------------------------- */
/* Test: 'textAlign' - via Setter Method */
/* ----------------------------------------------- */
QUnit.test("'textAlign' - via Setter Method", function(assert) {
testSetLabelProperty("textAlign", "Begin", "Setter");
testSetLabelProperty("textAlign", "End", "Setter");
testSetLabelProperty("textAlign", "Left", "Setter");
testSetLabelProperty("textAlign", "Right", "Setter");
testSetLabelProperty("textAlign", "Center", "Setter");
testSetLabelProperty("textAlign", "Initial", "Setter");
});
/* ----------------------------------------------- */
/* Test: 'width' - via Constructor */
/* ----------------------------------------------- */
QUnit.test("'width' - via Constructor", function(assert) {
testSetLabelProperty("width", "100px", "Constructor");
});
/* ----------------------------------------------- */
/* Test: 'width' - via Setter Method */
/* ----------------------------------------------- */
QUnit.test("'width' - via Setter Method", function(assert) {
testSetLabelProperty("width", "100px", "Setter");
});
QUnit.module("Basic CSS classes");
/* ----------------------------------------------- */
/* Test: Existence */
/* ----------------------------------------------- */
QUnit.test("Existence", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox();
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.ok(oCheckBox.$().hasClass("sapMCb"), "CheckBox should have class sapMCb");
assert.ok(oCheckBox.$("CbBg").hasClass("sapMCbBg"), "CheckBox should have class sapMCbBg");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* function: sapMCbHoverable */
/* ----------------------------------------------- */
function testSapMCbHoverable(oThat, bDesktop, sMessage) {
var stub = oThat.stub(sap.ui.Device, "system", {desktop : bDesktop});
// system under test
var oCheckBox = new sap.m.CheckBox();
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
if (bDesktop){
assert.ok(oCheckBox.$("CbBg").hasClass("sapMCbHoverable"), sMessage);
} else {
assert.ok(!oCheckBox.$("CbBg").hasClass("sapMCbHoverable"), sMessage);
}
// cleanup
oCheckBox.destroy();
}
/* ----------------------------------------------- */
/* Test: sapMCbHoverable (non-desktop environment) */
/* ----------------------------------------------- */
QUnit.test("sapMCbHoverable (non-desktop environment)", function(assert) {
testSapMCbHoverable(this, false, "CheckBox should not have class sapMCbHoverable");
});
/* ----------------------------------------------- */
/* Test: sapMCbHoverable (desktop environment) */
/* ----------------------------------------------- */
QUnit.test("sapMCbHoverable (desktop environment)", function(assert) {
testSapMCbHoverable(this, true, "CheckBox should have class sapMCbHoverable");
});
QUnit.module("Events");
/* ----------------------------------------------- */
/* Test: tap */
/* ----------------------------------------------- */
QUnit.test("tap", function(assert) {
// system under test
var oCheckBox = new sap.m.CheckBox();
var oSpy = this.spy();
oCheckBox.attachSelect(oSpy);
// arrange
oCheckBox.placeAt("content");
sap.ui.getCore().applyChanges();
// assertions
assert.equal(oCheckBox.getSelected(), false, "CheckBox should not be selected");
assert.strictEqual(oCheckBox.$().attr("aria-checked"), "false", "Property 'aria-checked': Default value should be 'false'");
qutils.triggerEvent("tap", oCheckBox.getId());
assert.ok(oSpy.calledOnce, "Event 'select' should have been fired");
assert.equal(oCheckBox.getSelected(), true, "CheckBox should be selected");
assert.strictEqual(oCheckBox.$().attr("aria-checked"), "true", "Property 'aria-checked': Default value should be 'true'");
qutils.triggerEvent("tap", oCheckBox.getId());
assert.ok(oSpy.calledTwice, "Event 'select' should have been fired");
assert.equal(oCheckBox.getSelected(), false, "CheckBox should not be selected");
oCheckBox.setEditable(false);
qutils.triggerEvent("tap", oCheckBox.getId());
assert.ok(oSpy.calledTwice, "Event 'select' should have been fired");
assert.equal(oCheckBox.getSelected(), false, "CheckBox should not be selected");
// cleanup
oCheckBox.destroy();
});
/* ----------------------------------------------- */
/* Test: SPACE key */
/* ----------------------------------------------- */
function testSpaceKey(sTestName, oOptions) {
QUnit.test(sTestName, function(assert) {
//Arrange
var oSpy = this.spy();
var oCheckBox = new sap.m.CheckBox({select : oSpy, selected : oOptions.selected});
// System under Test
oCheckBox.placeAt("qunit-fixture");
sap.ui.getCore().applyChanges();
oCheckBox.$().focus(); // set focus on checkbox
sap.ui.test.qunit.triggerKeydown(oCheckBox.$(), jQuery.sap.KeyCodes.SPACE); // trigger Space on checkbox
assert.strictEqual(oSpy.callCount, 1, "SPACE is pressed, select event was fired");
assert.equal(oCheckBox.getSelected(), oOptions.expectedSelection, oOptions.expectedMessage);
assert.strictEqual(oCheckBox.$().attr("aria-checked"), "" + oOptions.expectedSelection, oOptions.expectedMessageAria);
// Clean up
oCheckBox.destroy();
});
}
testSpaceKey("Press Space on not selected checkBox", {
selected : false,
expectedSelection : true,
expectedMessage: "CheckBox should be selected",
expectedMessageAria: "Property 'aria-checked' should be 'true'"
});
testSpaceKey("Press Space on selected checkBox", {
selected : true,
expectedSelection : false,
expectedMessage: "CheckBox should be deselected",
expectedMessageAria: "Property 'aria-checked' should be 'false'"
});
/* ----------------------------------------------- */
/* Test: ENTER key */
/* ----------------------------------------------- */
function testEnterKey(sTestName, oOptions) {
QUnit.test(sTestName, function(assert) {
//Arrange
var oSpy = this.spy();
var oCheckBox = new sap.m.CheckBox({select : oSpy, selected : oOptions.selected});
// System under Test
oCheckBox.placeAt("qunit-fixture");
sap.ui.getCore().applyChanges();
oCheckBox.$().focus(); // set focus on checkbox
sap.ui.test.qunit.triggerKeydown(oCheckBox.$(), jQuery.sap.KeyCodes.ENTER); // trigger Enter on checkbox
assert.strictEqual(oSpy.callCount, 1, "Enter is pressed, select event was fired");
assert.equal(oCheckBox.getSelected(), oOptions.expectedSelection, oOptions.expectedMessage);
assert.strictEqual(oCheckBox.$().attr("aria-checked"), "" + oOptions.expectedSelection, oOptions.expectedMessageAria);
// Clean up
oCheckBox.destroy();
});
}
testEnterKey("Press Enter on not selected checkBox", {
selected : false,
expectedSelection : true,
expectedMessage: "CheckBox should be selected",
expectedMessageAria: "Property 'aria-checked' should be 'true'"
});
testEnterKey("Press Enter on selected checkBox", {
selected : true,
expectedSelection : false,
expectedMessage: "CheckBox should be deselected",
expectedMessageAria: "Property 'aria-checked' should be 'false'"
});
QUnit.module("Accessibility");
QUnit.test("getAccessibilityInfo", function(assert) {
var oControl = new sap.m.CheckBox({text: "Text"});
assert.ok(!!oControl.getAccessibilityInfo, "CheckBox has a getAccessibilityInfo function");
var oInfo = oControl.getAccessibilityInfo();
assert.ok(!!oInfo, "getAccessibilityInfo returns a info object");
assert.strictEqual(oInfo.role, "checkbox", "AriaRole");
assert.strictEqual(oInfo.type, sap.ui.getCore().getLibraryResourceBundle("sap.m").getText("ACC_CTR_TYPE_CHECKBOX"), "Type");
assert.strictEqual(oInfo.description, "Text", "Description");
assert.strictEqual(oInfo.focusable, true, "Focusable");
assert.strictEqual(oInfo.enabled, true, "Enabled");
assert.strictEqual(oInfo.editable, true, "Editable");
oControl.setSelected(true);
oControl.setEnabled(false);
oControl.setEditable(false);
oInfo = oControl.getAccessibilityInfo();
assert.strictEqual(oInfo.description, "Text " + sap.ui.getCore().getLibraryResourceBundle("sap.m").getText("ACC_CTR_STATE_CHECKED"), "Description");
assert.strictEqual(oInfo.focusable, false, "Focusable");
assert.strictEqual(oInfo.enabled, false, "Enabled");
assert.strictEqual(oInfo.editable, false, "Editable");
oControl.destroy();
});
</script>
</head>
<body id="body" class="sapUiBody">
<h1 id="qunit-header">QUnit Page for sap.m.CheckBox</h1>
<h2 id="qunit-banner"></h2>
<h2 id="qunit-userAgent"></h2>
<div id="qunit-testrunner-toolbar"></div>
<ol id="qunit-tests"></ol>
<div id="content"></div>
<div id="qunit-fixture"></div>
</body>
</html>
| olirogers/openui5 | src/sap.m/test/sap/m/qunit/CheckBox.qunit.html | HTML | apache-2.0 | 24,667 |
<!DOCTYPE html >
<html>
<head>
<title>SeeStackDepthException - ScalaTest 3.0.2 - org.scalatest.events.SeeStackDepthException</title>
<meta name="description" content="SeeStackDepthException - ScalaTest 3.0.2 - org.scalatest.events.SeeStackDepthException" />
<meta name="keywords" content="SeeStackDepthException ScalaTest 3.0.2 org.scalatest.events.SeeStackDepthException" />
<meta http-equiv="content-type" content="text/html; charset=UTF-8" />
<link href="../../../lib/template.css" media="screen" type="text/css" rel="stylesheet" />
<link href="../../../lib/diagrams.css" media="screen" type="text/css" rel="stylesheet" id="diagrams-css" />
<script type="text/javascript" src="../../../lib/jquery.js" id="jquery-js"></script>
<script type="text/javascript" src="../../../lib/jquery-ui.js"></script>
<script type="text/javascript" src="../../../lib/template.js"></script>
<script type="text/javascript" src="../../../lib/tools.tooltip.js"></script>
<script type="text/javascript">
if(top === self) {
var url = '../../../index.html';
var hash = 'org.scalatest.events.SeeStackDepthException$';
var anchor = window.location.hash;
var anchor_opt = '';
if (anchor.length >= 1)
anchor_opt = '@' + anchor.substring(1);
window.location.href = url + '#' + hash + anchor_opt;
}
</script>
</head>
<body class="value">
<!-- Top of doc.scalatest.org [javascript] -->
<script type="text/javascript">
var rnd = window.rnd || Math.floor(Math.random()*10e6);
var pid204546 = window.pid204546 || rnd;
var plc204546 = window.plc204546 || 0;
var abkw = window.abkw || '';
var absrc = 'http://ab167933.adbutler-ikon.com/adserve/;ID=167933;size=468x60;setID=204546;type=js;sw='+screen.width+';sh='+screen.height+';spr='+window.devicePixelRatio+';kw='+abkw+';pid='+pid204546+';place='+(plc204546++)+';rnd='+rnd+';click=CLICK_MACRO_PLACEHOLDER';
document.write('<scr'+'ipt src="'+absrc+'" type="text/javascript"></scr'+'ipt>');
</script>
<div id="definition">
<img alt="Object" src="../../../lib/object_big.png" />
<p id="owner"><a href="../../package.html" class="extype" name="org">org</a>.<a href="../package.html" class="extype" name="org.scalatest">scalatest</a>.<a href="package.html" class="extype" name="org.scalatest.events">events</a></p>
<h1>SeeStackDepthException</h1><h3><span class="morelinks"><div>Related Doc:
<a href="package.html" class="extype" name="org.scalatest.events">package events</a>
</div></span></h3><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
</div>
<h4 id="signature" class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">object</span>
</span>
<span class="symbol">
<span class="name">SeeStackDepthException</span><span class="result"> extends <a href="Location.html" class="extype" name="org.scalatest.events.Location">Location</a> with <span class="extype" name="scala.Product">Product</span> with <span class="extype" name="scala.Serializable">Serializable</span></span>
</span>
</h4>
<div id="comment" class="fullcommenttop"><div class="comment cmt"><p>Indicates the location should be taken from the stack depth exception, included elsewhere in
the event that contained this location.
</p></div><dl class="attributes block"> <dt>Source</dt><dd><a href="https://github.com/scalatest/scalatest/tree/release-3.0.2/scalatest//src/main/scala/org/scalatest/events/Location.scala" target="_blank">Location.scala</a></dd></dl><div class="toggleContainer block">
<span class="toggle">Linear Supertypes</span>
<div class="superTypes hiddenContent"><a href="Location.html" class="extype" name="org.scalatest.events.Location">Location</a>, <span class="extype" name="scala.Serializable">Serializable</span>, <span class="extype" name="java.io.Serializable">Serializable</span>, <span class="extype" name="scala.Product">Product</span>, <span class="extype" name="scala.Equals">Equals</span>, <span class="extype" name="scala.AnyRef">AnyRef</span>, <span class="extype" name="scala.Any">Any</span></div>
</div></div>
<div id="mbrsel">
<div id="textfilter"><span class="pre"></span><span class="input"><input id="mbrsel-input" type="text" accesskey="/" /></span><span class="post"></span></div>
<div id="order">
<span class="filtertype">Ordering</span>
<ol>
<li class="alpha in"><span>Alphabetic</span></li>
<li class="inherit out"><span>By Inheritance</span></li>
</ol>
</div>
<div id="ancestors">
<span class="filtertype">Inherited<br />
</span>
<ol id="linearization">
<li class="in" name="org.scalatest.events.SeeStackDepthException"><span>SeeStackDepthException</span></li><li class="in" name="org.scalatest.events.Location"><span>Location</span></li><li class="in" name="scala.Serializable"><span>Serializable</span></li><li class="in" name="java.io.Serializable"><span>Serializable</span></li><li class="in" name="scala.Product"><span>Product</span></li><li class="in" name="scala.Equals"><span>Equals</span></li><li class="in" name="scala.AnyRef"><span>AnyRef</span></li><li class="in" name="scala.Any"><span>Any</span></li>
</ol>
</div><div id="ancestors">
<span class="filtertype"></span>
<ol>
<li class="hideall out"><span>Hide All</span></li>
<li class="showall in"><span>Show All</span></li>
</ol>
</div>
<div id="visbl">
<span class="filtertype">Visibility</span>
<ol><li class="public in"><span>Public</span></li><li class="all out"><span>All</span></li></ol>
</div>
</div>
<div id="template">
<div id="allMembers">
<div id="values" class="values members">
<h3>Value Members</h3>
<ol><li name="scala.AnyRef#!=" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="!=(x$1:Any):Boolean"></a>
<a id="!=(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $bang$eq" class="name">!=</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@!=(x$1:Any):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.AnyRef###" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="##():Int"></a>
<a id="##():Int"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $hash$hash" class="name">##</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Int">Int</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@##():Int" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.AnyRef#==" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="==(x$1:Any):Boolean"></a>
<a id="==(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span title="gt4s: $eq$eq" class="name">==</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@==(x$1:Any):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.Any#asInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="asInstanceOf[T0]:T0"></a>
<a id="asInstanceOf[T0]:T0"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">asInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <span class="extype" name="scala.Any.asInstanceOf.T0">T0</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@asInstanceOf[T0]:T0" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="scala.AnyRef#clone" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="clone():Object"></a>
<a id="clone():AnyRef"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">clone</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.AnyRef">AnyRef</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@clone():Object" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.CloneNotSupportedException]">...</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#eq" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="eq(x$1:AnyRef):Boolean"></a>
<a id="eq(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">eq</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@eq(x$1:AnyRef):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#equals" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="equals(x$1:Any):Boolean"></a>
<a id="equals(Any):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">equals</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Any">Any</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@equals(x$1:Any):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.AnyRef#finalize" visbl="prt" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="finalize():Unit"></a>
<a id="finalize():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier"></span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">finalize</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@finalize():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Attributes</dt><dd>protected[<a href="../../../java$lang.html" class="extype" name="java.lang">java.lang</a>] </dd><dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="symbol">classOf[java.lang.Throwable]</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#getClass" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="getClass():Class[_]"></a>
<a id="getClass():Class[_]"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">getClass</span><span class="params">()</span><span class="result">: <span class="extype" name="java.lang.Class">Class</span>[_]</span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@getClass():Class[_]" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef → Any</dd></dl></div>
</li><li name="scala.Any#isInstanceOf" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="isInstanceOf[T0]:Boolean"></a>
<a id="isInstanceOf[T0]:Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">isInstanceOf</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@isInstanceOf[T0]:Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>Any</dd></dl></div>
</li><li name="scala.AnyRef#ne" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="ne(x$1:AnyRef):Boolean"></a>
<a id="ne(AnyRef):Boolean"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">ne</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.AnyRef">AnyRef</span></span>)</span><span class="result">: <span class="extype" name="scala.Boolean">Boolean</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@ne(x$1:AnyRef):Boolean" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#notify" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="notify():Unit"></a>
<a id="notify():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">notify</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@notify():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#notifyAll" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="notifyAll():Unit"></a>
<a id="notifyAll():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">notifyAll</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@notifyAll():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#synchronized" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="synchronized[T0](x$1:=>T0):T0"></a>
<a id="synchronized[T0](⇒T0):T0"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">synchronized</span><span class="tparams">[<span name="T0">T0</span>]</span><span class="params">(<span name="arg0">arg0: ⇒ <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span>)</span><span class="result">: <span class="extype" name="java.lang.AnyRef.synchronized.T0">T0</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@synchronized[T0](x$1:=>T0):T0" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait():Unit"></a>
<a id="wait():Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">()</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@wait():Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.InterruptedException]">...</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait(x$1:Long,x$2:Int):Unit"></a>
<a id="wait(Long,Int):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Long">Long</span></span>, <span name="arg1">arg1: <span class="extype" name="scala.Int">Int</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@wait(x$1:Long,x$2:Int):Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.InterruptedException]">...</span>
</span>)</span>
</dd></dl></div>
</li><li name="scala.AnyRef#wait" visbl="pub" data-isabs="false" fullComment="yes" group="Ungrouped">
<a id="wait(x$1:Long):Unit"></a>
<a id="wait(Long):Unit"></a>
<h4 class="signature">
<span class="modifier_kind">
<span class="modifier">final </span>
<span class="kind">def</span>
</span>
<span class="symbol">
<span class="name">wait</span><span class="params">(<span name="arg0">arg0: <span class="extype" name="scala.Long">Long</span></span>)</span><span class="result">: <span class="extype" name="scala.Unit">Unit</span></span>
</span>
</h4><span class="permalink">
<a href="../../../index.html#org.scalatest.events.SeeStackDepthException$@wait(x$1:Long):Unit" title="Permalink" target="_top">
<img src="../../../lib/permalink.png" alt="Permalink" />
</a>
</span>
<div class="fullcomment"><dl class="attributes block"> <dt>Definition Classes</dt><dd>AnyRef</dd><dt>Annotations</dt><dd>
<span class="name">@throws</span><span class="args">(<span>
<span class="defval" name="classOf[java.lang.InterruptedException]">...</span>
</span>)</span>
</dd></dl></div>
</li></ol>
</div>
</div>
<div id="inheritedMembers">
<div class="parent" name="org.scalatest.events.Location">
<h3>Inherited from <a href="Location.html" class="extype" name="org.scalatest.events.Location">Location</a></h3>
</div><div class="parent" name="scala.Serializable">
<h3>Inherited from <span class="extype" name="scala.Serializable">Serializable</span></h3>
</div><div class="parent" name="java.io.Serializable">
<h3>Inherited from <span class="extype" name="java.io.Serializable">Serializable</span></h3>
</div><div class="parent" name="scala.Product">
<h3>Inherited from <span class="extype" name="scala.Product">Product</span></h3>
</div><div class="parent" name="scala.Equals">
<h3>Inherited from <span class="extype" name="scala.Equals">Equals</span></h3>
</div><div class="parent" name="scala.AnyRef">
<h3>Inherited from <span class="extype" name="scala.AnyRef">AnyRef</span></h3>
</div><div class="parent" name="scala.Any">
<h3>Inherited from <span class="extype" name="scala.Any">Any</span></h3>
</div>
</div>
<div id="groupedMembers">
<div class="group" name="Ungrouped">
<h3>Ungrouped</h3>
</div>
</div>
</div>
<div id="tooltip"></div>
<div id="footer"> </div>
</body>
</html>
| scalatest/scalatest-website | public/scaladoc/3.0.2/org/scalatest/events/SeeStackDepthException$.html | HTML | apache-2.0 | 25,954 |
# Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import hashlib
import os
from elasticsearch import Elasticsearch, TransportError
from elasticsearch.helpers import bulk_index
from warehouse.utils import AttributeDict
class Index(object):
_index = "warehouse"
def __init__(self, models, config):
self.models = models
self.config = config
self.es = Elasticsearch(
hosts=self.config.hosts,
**self.config.get("client_options", {})
)
self.types = AttributeDict()
def register(self, type_):
obj = type_(self)
self.types[obj._type] = obj
def reindex(self, index=None, alias=True, keep_old=False):
# Generate an Index Name for Warehouse
index = "".join([
index if index is not None else self._index,
hashlib.md5(os.urandom(16)).hexdigest()[:8],
])
# Create this index
self.es.indices.create(index, {
"mappings": {
doc_type._type: doc_type.get_mapping()
for doc_type in self.types.values()
},
})
# Index everything into the new index
for doc_type in self.types.values():
doc_type.index_all(index=index)
# Update the alias unless we've been told not to
if alias:
self.update_alias(self._index, index, keep_old=keep_old)
def update_alias(self, alias, index, keep_old=False):
# Get the old index from ElasticSearch
try:
old_index = self.es.indices.get_alias(self._index).keys()[0]
except TransportError as exc:
if not exc.status_code == 404:
raise
old_index = None
# Remove the alias to the old index if it exists
if old_index is not None:
actions = [{"remove": {"index": old_index, "alias": alias}}]
else:
actions = []
# Add the alias to the new index
actions += [{"add": {"index": index, "alias": alias}}]
# Update To the New Index
self.es.indices.update_aliases({"actions": actions})
# Delete the old index if it exists and unless we're keeping it
if not keep_old and old_index is not None:
self.es.indices.delete(old_index)
class BaseMapping(object):
SEARCH_LIMIT = 25
def __init__(self, index):
self.index = index
def get_mapping(self):
raise NotImplementedError
def get_indexable(self):
raise NotImplementedError
def extract_id(self, item):
raise NotImplementedError
def extract_document(self, item):
raise NotImplementedError
def index_all(self, index=None):
# Determine which index we are indexing into
_index = index if index is not None else self.index._index
# Bulk Index our documents
bulk_index(
self.index.es,
[
{
"_index": _index,
"_type": self._type,
"_id": self.extract_id(item),
"_source": self.extract_document(item),
}
for item in self.get_indexable()
],
)
def search(self, query):
raise NotImplementedError
| mattrobenolt/warehouse | warehouse/search/indexes.py | Python | apache-2.0 | 3,926 |
/*
* Licensed to Elasticsearch under one or more contributor
* license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright
* ownership. Elasticsearch licenses this file to you under
* the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.elasticsearch.action.support.master;
import org.elasticsearch.Version;
import org.elasticsearch.action.ActionFuture;
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.ActionResponse;
import org.elasticsearch.action.support.ActionFilters;
import org.elasticsearch.action.support.PlainActionFuture;
import org.elasticsearch.action.support.ThreadedActionListener;
import org.elasticsearch.action.support.replication.ClusterStateCreationUtils;
import org.elasticsearch.cluster.ClusterState;
import org.elasticsearch.cluster.NotMasterException;
import org.elasticsearch.cluster.block.ClusterBlock;
import org.elasticsearch.cluster.block.ClusterBlockException;
import org.elasticsearch.cluster.block.ClusterBlockLevel;
import org.elasticsearch.cluster.block.ClusterBlocks;
import org.elasticsearch.cluster.metadata.IndexNameExpressionResolver;
import org.elasticsearch.cluster.node.DiscoveryNode;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.DummyTransportAddress;
import org.elasticsearch.common.unit.TimeValue;
import org.elasticsearch.discovery.Discovery;
import org.elasticsearch.discovery.MasterNotDiscoveredException;
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.tasks.Task;
import org.elasticsearch.test.ESTestCase;
import org.elasticsearch.test.transport.CapturingTransport;
import org.elasticsearch.threadpool.TestThreadPool;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.ConnectTransportException;
import org.elasticsearch.transport.TransportService;
import org.junit.After;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import java.util.Collections;
import java.util.HashSet;
import java.util.Set;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import static org.elasticsearch.test.ClusterServiceUtils.createClusterService;
import static org.elasticsearch.test.ClusterServiceUtils.setState;
import static org.hamcrest.Matchers.equalTo;
import static org.hamcrest.Matchers.instanceOf;
public class TransportMasterNodeActionTests extends ESTestCase {
private static ThreadPool threadPool;
private ClusterService clusterService;
private TransportService transportService;
private CapturingTransport transport;
private DiscoveryNode localNode;
private DiscoveryNode remoteNode;
private DiscoveryNode[] allNodes;
@BeforeClass
public static void beforeClass() {
threadPool = new TestThreadPool("TransportMasterNodeActionTests");
}
@Override
@Before
public void setUp() throws Exception {
super.setUp();
transport = new CapturingTransport();
clusterService = createClusterService(threadPool);
transportService = new TransportService(clusterService.getSettings(), transport, threadPool);
transportService.start();
transportService.acceptIncomingRequests();
localNode = new DiscoveryNode("local_node", DummyTransportAddress.INSTANCE, Collections.emptyMap(),
Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT);
remoteNode = new DiscoveryNode("remote_node", DummyTransportAddress.INSTANCE, Collections.emptyMap(),
Collections.singleton(DiscoveryNode.Role.MASTER), Version.CURRENT);
allNodes = new DiscoveryNode[]{localNode, remoteNode};
}
@After
public void tearDown() throws Exception {
super.tearDown();
clusterService.close();
transportService.close();
}
@AfterClass
public static void afterClass() {
ThreadPool.terminate(threadPool, 30, TimeUnit.SECONDS);
threadPool = null;
}
void assertListenerThrows(String msg, ActionFuture<?> listener, Class<?> klass) throws InterruptedException {
try {
listener.get();
fail(msg);
} catch (ExecutionException ex) {
assertThat(ex.getCause(), instanceOf(klass));
}
}
public static class Request extends MasterNodeRequest<Request> {
@Override
public ActionRequestValidationException validate() {
return null;
}
}
class Response extends ActionResponse {}
class Action extends TransportMasterNodeAction<Request, Response> {
Action(Settings settings, String actionName, TransportService transportService, ClusterService clusterService, ThreadPool threadPool) {
super(settings, actionName, transportService, clusterService, threadPool,
new ActionFilters(new HashSet<>()), new IndexNameExpressionResolver(Settings.EMPTY), Request::new);
}
@Override
protected void doExecute(Task task, final Request request, ActionListener<Response> listener) {
// remove unneeded threading by wrapping listener with SAME to prevent super.doExecute from wrapping it with LISTENER
super.doExecute(task, request, new ThreadedActionListener<>(logger, threadPool, ThreadPool.Names.SAME, listener));
}
@Override
protected String executor() {
// very lightweight operation in memory, no need to fork to a thread
return ThreadPool.Names.SAME;
}
@Override
protected Response newResponse() {
return new Response();
}
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
listener.onResponse(new Response()); // default implementation, overridden in specific tests
}
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
return null; // default implementation, overridden in specific tests
}
}
public void testLocalOperationWithoutBlocks() throws ExecutionException, InterruptedException {
final boolean masterOperationFailure = randomBoolean();
Request request = new Request();
PlainActionFuture<Response> listener = new PlainActionFuture<>();
final Throwable exception = new Throwable();
final Response response = new Response();
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected void masterOperation(Task task, Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
if (masterOperationFailure) {
listener.onFailure(exception);
} else {
listener.onResponse(response);
}
}
}.execute(request, listener);
assertTrue(listener.isDone());
if (masterOperationFailure) {
try {
listener.get();
fail("Expected exception but returned proper result");
} catch (ExecutionException ex) {
assertThat(ex.getCause(), equalTo(exception));
}
} else {
assertThat(listener.get(), equalTo(response));
}
}
public void testLocalOperationWithBlocks() throws ExecutionException, InterruptedException {
final boolean retryableBlock = randomBoolean();
final boolean unblockBeforeTimeout = randomBoolean();
Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(unblockBeforeTimeout ? 60 : 0));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
ClusterBlock block = new ClusterBlock(1, "", retryableBlock, true,
randomFrom(RestStatus.values()), ClusterBlockLevel.ALL);
ClusterState stateWithBlock = ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes))
.blocks(ClusterBlocks.builder().addGlobalBlock(block)).build();
setState(clusterService, stateWithBlock);
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected ClusterBlockException checkBlock(Request request, ClusterState state) {
Set<ClusterBlock> blocks = state.blocks().global();
return blocks.isEmpty() ? null : new ClusterBlockException(blocks);
}
}.execute(request, listener);
if (retryableBlock && unblockBeforeTimeout) {
assertFalse(listener.isDone());
setState(clusterService, ClusterState.builder(ClusterStateCreationUtils.state(localNode, localNode, allNodes))
.blocks(ClusterBlocks.EMPTY_CLUSTER_BLOCK).build());
assertTrue(listener.isDone());
listener.get();
return;
}
assertTrue(listener.isDone());
if (retryableBlock) {
try {
listener.get();
fail("Expected exception but returned proper result");
} catch (ExecutionException ex) {
assertThat(ex.getCause(), instanceOf(MasterNotDiscoveredException.class));
assertThat(ex.getCause().getCause(), instanceOf(ClusterBlockException.class));
}
} else {
assertListenerThrows("ClusterBlockException should be thrown", listener, ClusterBlockException.class);
}
}
public void testForceLocalOperation() throws ExecutionException, InterruptedException {
Request request = new Request();
PlainActionFuture<Response> listener = new PlainActionFuture<>();
setState(clusterService, ClusterStateCreationUtils.state(localNode, randomFrom(null, localNode, remoteNode), allNodes));
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected boolean localExecute(Request request) {
return true;
}
}.execute(request, listener);
assertTrue(listener.isDone());
listener.get();
}
public void testMasterNotAvailable() throws ExecutionException, InterruptedException {
Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(0));
setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertTrue(listener.isDone());
assertListenerThrows("MasterNotDiscoveredException should be thrown", listener, MasterNotDiscoveredException.class);
}
public void testMasterBecomesAvailable() throws ExecutionException, InterruptedException {
Request request = new Request();
setState(clusterService, ClusterStateCreationUtils.state(localNode, null, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertFalse(listener.isDone());
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
assertTrue(listener.isDone());
listener.get();
}
public void testDelegateToMaster() throws ExecutionException, InterruptedException {
Request request = new Request();
setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertThat(transport.capturedRequests().length, equalTo(1));
CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0];
assertTrue(capturedRequest.node.isMasterNode());
assertThat(capturedRequest.request, equalTo(request));
assertThat(capturedRequest.action, equalTo("testAction"));
Response response = new Response();
transport.handleResponse(capturedRequest.requestId, response);
assertTrue(listener.isDone());
assertThat(listener.get(), equalTo(response));
}
public void testDelegateToFailingMaster() throws ExecutionException, InterruptedException {
boolean failsWithConnectTransportException = randomBoolean();
Request request = new Request().masterNodeTimeout(TimeValue.timeValueSeconds(failsWithConnectTransportException ? 60 : 0));
setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool).execute(request, listener);
assertThat(transport.capturedRequests().length, equalTo(1));
CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0];
assertTrue(capturedRequest.node.isMasterNode());
assertThat(capturedRequest.request, equalTo(request));
assertThat(capturedRequest.action, equalTo("testAction"));
if (failsWithConnectTransportException) {
transport.handleRemoteError(capturedRequest.requestId, new ConnectTransportException(remoteNode, "Fake error"));
assertFalse(listener.isDone());
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
assertTrue(listener.isDone());
listener.get();
} else {
Throwable t = new Throwable();
transport.handleRemoteError(capturedRequest.requestId, t);
assertTrue(listener.isDone());
try {
listener.get();
fail("Expected exception but returned proper result");
} catch (ExecutionException ex) {
assertThat(ex.getCause().getCause(), equalTo(t));
}
}
}
public void testMasterFailoverAfterStepDown() throws ExecutionException, InterruptedException {
Request request = new Request().masterNodeTimeout(TimeValue.timeValueHours(1));
PlainActionFuture<Response> listener = new PlainActionFuture<>();
final Response response = new Response();
setState(clusterService, ClusterStateCreationUtils.state(localNode, localNode, allNodes));
new Action(Settings.EMPTY, "testAction", transportService, clusterService, threadPool) {
@Override
protected void masterOperation(Request request, ClusterState state, ActionListener<Response> listener) throws Exception {
// The other node has become master, simulate failures of this node while publishing cluster state through ZenDiscovery
setState(clusterService, ClusterStateCreationUtils.state(localNode, remoteNode, allNodes));
Throwable failure = randomBoolean()
? new Discovery.FailedToCommitClusterStateException("Fake error")
: new NotMasterException("Fake error");
listener.onFailure(failure);
}
}.execute(request, listener);
assertThat(transport.capturedRequests().length, equalTo(1));
CapturingTransport.CapturedRequest capturedRequest = transport.capturedRequests()[0];
assertTrue(capturedRequest.node.isMasterNode());
assertThat(capturedRequest.request, equalTo(request));
assertThat(capturedRequest.action, equalTo("testAction"));
transport.handleResponse(capturedRequest.requestId, response);
assertTrue(listener.isDone());
assertThat(listener.get(), equalTo(response));
}
}
| palecur/elasticsearch | core/src/test/java/org/elasticsearch/action/support/master/TransportMasterNodeActionTests.java | Java | apache-2.0 | 16,875 |
/*
* Copyright 2000-2016 JetBrains s.r.o.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intellij.ui.components;
import com.intellij.openapi.diagnostic.Logger;
import com.intellij.openapi.util.Key;
import com.intellij.openapi.util.SystemInfo;
import com.intellij.openapi.util.registry.Registry;
import com.intellij.openapi.wm.IdeGlassPane;
import com.intellij.ui.IdeBorderFactory;
import com.intellij.util.ArrayUtil;
import com.intellij.util.ReflectionUtil;
import com.intellij.util.ui.ButtonlessScrollBarUI;
import com.intellij.util.ui.JBInsets;
import com.intellij.util.ui.RegionPainter;
import com.intellij.util.ui.UIUtil;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import javax.swing.*;
import javax.swing.border.Border;
import javax.swing.border.LineBorder;
import javax.swing.plaf.ScrollBarUI;
import javax.swing.plaf.ScrollPaneUI;
import javax.swing.plaf.UIResource;
import javax.swing.plaf.basic.BasicScrollBarUI;
import javax.swing.plaf.basic.BasicScrollPaneUI;
import java.awt.*;
import java.awt.event.InputEvent;
import java.awt.event.MouseEvent;
import java.awt.event.MouseWheelEvent;
import java.awt.event.MouseWheelListener;
import java.lang.reflect.Field;
public class JBScrollPane extends JScrollPane {
/**
* This key is used to specify which colors should use the scroll bars on the pane.
* If a client property is set to {@code true} the bar's brightness
* will be modified according to the view's background.
*
* @see UIUtil#putClientProperty
* @see UIUtil#isUnderDarcula
*/
public static final Key<Boolean> BRIGHTNESS_FROM_VIEW = Key.create("JB_SCROLL_PANE_BRIGHTNESS_FROM_VIEW");
@Deprecated
public static final RegionPainter<Float> THUMB_PAINTER = ScrollPainter.EditorThumb.DEFAULT;
@Deprecated
public static final RegionPainter<Float> THUMB_DARK_PAINTER = ScrollPainter.EditorThumb.DARCULA;
@Deprecated
public static final RegionPainter<Float> MAC_THUMB_PAINTER = ScrollPainter.EditorThumb.Mac.DEFAULT;
@Deprecated
public static final RegionPainter<Float> MAC_THUMB_DARK_PAINTER = ScrollPainter.EditorThumb.Mac.DARCULA;
private static final Logger LOG = Logger.getInstance(JBScrollPane.class);
private int myViewportBorderWidth = -1;
private boolean myHasOverlayScrollbars;
private volatile boolean myBackgroundRequested; // avoid cyclic references
public JBScrollPane(int viewportWidth) {
init(false);
myViewportBorderWidth = viewportWidth;
updateViewportBorder();
}
public JBScrollPane() {
init();
}
public JBScrollPane(Component view) {
super(view);
init();
}
public JBScrollPane(int vsbPolicy, int hsbPolicy) {
super(vsbPolicy, hsbPolicy);
init();
}
public JBScrollPane(Component view, int vsbPolicy, int hsbPolicy) {
super(view, vsbPolicy, hsbPolicy);
init();
}
@Override
public Color getBackground() {
Color color = super.getBackground();
if (!myBackgroundRequested && EventQueue.isDispatchThread() && Registry.is("ide.scroll.background.auto")) {
if (!isBackgroundSet() || color instanceof UIResource) {
Component child = getViewport();
if (child != null) {
try {
myBackgroundRequested = true;
return child.getBackground();
}
finally {
myBackgroundRequested = false;
}
}
}
}
return color;
}
static Color getViewBackground(JScrollPane pane) {
if (pane == null) return null;
JViewport viewport = pane.getViewport();
if (viewport == null) return null;
Component view = viewport.getView();
if (view == null) return null;
return view.getBackground();
}
public static JScrollPane findScrollPane(Component c) {
if (c == null) return null;
if (!(c instanceof JViewport)) {
Container vp = c.getParent();
if (vp instanceof JViewport) c = vp;
}
c = c.getParent();
if (!(c instanceof JScrollPane)) return null;
return (JScrollPane)c;
}
private void init() {
init(true);
}
private void init(boolean setupCorners) {
setLayout(Registry.is("ide.scroll.new.layout") ? new Layout() : new ScrollPaneLayout());
if (setupCorners) {
setupCorners();
}
}
protected void setupCorners() {
setBorder(IdeBorderFactory.createBorder());
setCorner(UPPER_RIGHT_CORNER, new Corner(UPPER_RIGHT_CORNER));
setCorner(UPPER_LEFT_CORNER, new Corner(UPPER_LEFT_CORNER));
setCorner(LOWER_RIGHT_CORNER, new Corner(LOWER_RIGHT_CORNER));
setCorner(LOWER_LEFT_CORNER, new Corner(LOWER_LEFT_CORNER));
}
@Override
public void setUI(ScrollPaneUI ui) {
super.setUI(ui);
updateViewportBorder();
if (ui instanceof BasicScrollPaneUI) {
try {
Field field = BasicScrollPaneUI.class.getDeclaredField("mouseScrollListener");
field.setAccessible(true);
Object value = field.get(ui);
if (value instanceof MouseWheelListener) {
MouseWheelListener oldListener = (MouseWheelListener)value;
MouseWheelListener newListener = event -> {
if (isScrollEvent(event)) {
Object source = event.getSource();
if (source instanceof JScrollPane) {
JScrollPane pane = (JScrollPane)source;
if (pane.isWheelScrollingEnabled()) {
JScrollBar bar = event.isShiftDown() ? pane.getHorizontalScrollBar() : pane.getVerticalScrollBar();
if (bar != null && bar.isVisible()) oldListener.mouseWheelMoved(event);
}
}
}
};
field.set(ui, newListener);
// replace listener if field updated successfully
removeMouseWheelListener(oldListener);
addMouseWheelListener(newListener);
}
}
catch (Exception exception) {
LOG.warn(exception);
}
}
}
@Override
public boolean isOptimizedDrawingEnabled() {
if (getLayout() instanceof Layout) {
return isOptimizedDrawingEnabledFor(getVerticalScrollBar()) &&
isOptimizedDrawingEnabledFor(getHorizontalScrollBar());
}
return !myHasOverlayScrollbars;
}
/**
* Returns {@code false} for visible translucent scroll bars, or {@code true} otherwise.
* It is needed to repaint translucent scroll bars on viewport repainting.
*/
private static boolean isOptimizedDrawingEnabledFor(JScrollBar bar) {
return bar == null || bar.isOpaque() || !bar.isVisible();
}
private void updateViewportBorder() {
if (getViewportBorder() instanceof ViewportBorder) {
setViewportBorder(new ViewportBorder(myViewportBorderWidth >= 0 ? myViewportBorderWidth : 1));
}
}
public static ViewportBorder createIndentBorder() {
return new ViewportBorder(2);
}
@Override
public JScrollBar createVerticalScrollBar() {
return new MyScrollBar(Adjustable.VERTICAL);
}
@NotNull
@Override
public JScrollBar createHorizontalScrollBar() {
return new MyScrollBar(Adjustable.HORIZONTAL);
}
@Override
protected JViewport createViewport() {
return new JBViewport();
}
@SuppressWarnings("deprecation")
@Override
public void layout() {
LayoutManager layout = getLayout();
ScrollPaneLayout scrollLayout = layout instanceof ScrollPaneLayout ? (ScrollPaneLayout)layout : null;
// Now we let JScrollPane layout everything as necessary
super.layout();
if (layout instanceof Layout) return;
if (scrollLayout != null) {
// Now it's time to jump in and expand the viewport so it fits the whole area
// (taking into consideration corners, headers and other stuff).
myHasOverlayScrollbars = relayoutScrollbars(
this, scrollLayout,
myHasOverlayScrollbars // If last time we did relayouting, we should restore it back.
);
}
else {
myHasOverlayScrollbars = false;
}
}
private boolean relayoutScrollbars(@NotNull JComponent container, @NotNull ScrollPaneLayout layout, boolean forceRelayout) {
JViewport viewport = layout.getViewport();
if (viewport == null) return false;
JScrollBar vsb = layout.getVerticalScrollBar();
JScrollBar hsb = layout.getHorizontalScrollBar();
JViewport colHead = layout.getColumnHeader();
JViewport rowHead = layout.getRowHeader();
Rectangle viewportBounds = viewport.getBounds();
boolean extendViewportUnderVScrollbar = vsb != null && shouldExtendViewportUnderScrollbar(vsb);
boolean extendViewportUnderHScrollbar = hsb != null && shouldExtendViewportUnderScrollbar(hsb);
boolean hasOverlayScrollbars = extendViewportUnderVScrollbar || extendViewportUnderHScrollbar;
if (!hasOverlayScrollbars && !forceRelayout) return false;
container.setComponentZOrder(viewport, container.getComponentCount() - 1);
if (vsb != null) container.setComponentZOrder(vsb, 0);
if (hsb != null) container.setComponentZOrder(hsb, 0);
if (extendViewportUnderVScrollbar) {
int x2 = Math.max(vsb.getX() + vsb.getWidth(), viewportBounds.x + viewportBounds.width);
viewportBounds.x = Math.min(viewportBounds.x, vsb.getX());
viewportBounds.width = x2 - viewportBounds.x;
}
if (extendViewportUnderHScrollbar) {
int y2 = Math.max(hsb.getY() + hsb.getHeight(), viewportBounds.y + viewportBounds.height);
viewportBounds.y = Math.min(viewportBounds.y, hsb.getY());
viewportBounds.height = y2 - viewportBounds.y;
}
if (extendViewportUnderVScrollbar) {
if (hsb != null) {
Rectangle scrollbarBounds = hsb.getBounds();
scrollbarBounds.width = viewportBounds.x + viewportBounds.width - scrollbarBounds.x;
hsb.setBounds(scrollbarBounds);
}
if (colHead != null) {
Rectangle headerBounds = colHead.getBounds();
headerBounds.width = viewportBounds.width;
colHead.setBounds(headerBounds);
}
hideFromView(layout.getCorner(UPPER_RIGHT_CORNER));
hideFromView(layout.getCorner(LOWER_RIGHT_CORNER));
}
if (extendViewportUnderHScrollbar) {
if (vsb != null) {
Rectangle scrollbarBounds = vsb.getBounds();
scrollbarBounds.height = viewportBounds.y + viewportBounds.height - scrollbarBounds.y;
vsb.setBounds(scrollbarBounds);
}
if (rowHead != null) {
Rectangle headerBounds = rowHead.getBounds();
headerBounds.height = viewportBounds.height;
rowHead.setBounds(headerBounds);
}
hideFromView(layout.getCorner(LOWER_LEFT_CORNER));
hideFromView(layout.getCorner(LOWER_RIGHT_CORNER));
}
viewport.setBounds(viewportBounds);
return hasOverlayScrollbars;
}
private boolean shouldExtendViewportUnderScrollbar(@Nullable JScrollBar scrollbar) {
if (scrollbar == null || !scrollbar.isVisible()) return false;
return isOverlaidScrollbar(scrollbar);
}
protected boolean isOverlaidScrollbar(@Nullable JScrollBar scrollbar) {
if (!ButtonlessScrollBarUI.isMacOverlayScrollbarSupported()) return false;
ScrollBarUI vsbUI = scrollbar == null ? null : scrollbar.getUI();
return vsbUI instanceof ButtonlessScrollBarUI && !((ButtonlessScrollBarUI)vsbUI).alwaysShowTrack();
}
private static void hideFromView(Component component) {
if (component == null) return;
component.setBounds(-10, -10, 1, 1);
}
private class MyScrollBar extends ScrollBar implements IdeGlassPane.TopComponent {
public MyScrollBar(int orientation) {
super(orientation);
}
@Override
public void updateUI() {
ScrollBarUI ui = getUI();
if (ui instanceof DefaultScrollBarUI) return;
setUI(JBScrollBar.createUI(this));
}
@Override
public boolean canBePreprocessed(MouseEvent e) {
return JBScrollPane.canBePreprocessed(e, this);
}
}
public static boolean canBePreprocessed(MouseEvent e, JScrollBar bar) {
if (e.getID() == MouseEvent.MOUSE_MOVED || e.getID() == MouseEvent.MOUSE_PRESSED) {
ScrollBarUI ui = bar.getUI();
if (ui instanceof BasicScrollBarUI) {
BasicScrollBarUI bui = (BasicScrollBarUI)ui;
try {
Rectangle rect = (Rectangle)ReflectionUtil.getDeclaredMethod(BasicScrollBarUI.class, "getThumbBounds", ArrayUtil.EMPTY_CLASS_ARRAY).invoke(bui);
Point point = SwingUtilities.convertPoint(e.getComponent(), e.getX(), e.getY(), bar);
return !rect.contains(point);
}
catch (Exception e1) {
return true;
}
}
else if (ui instanceof DefaultScrollBarUI) {
DefaultScrollBarUI dui = (DefaultScrollBarUI)ui;
Point point = e.getLocationOnScreen();
SwingUtilities.convertPointFromScreen(point, bar);
return !dui.isThumbContains(point.x, point.y);
}
}
return true;
}
private static class Corner extends JPanel {
private final String myPos;
public Corner(String pos) {
myPos = pos;
ScrollColorProducer.setBackground(this);
ScrollColorProducer.setForeground(this);
}
@Override
protected void paintComponent(Graphics g) {
g.setColor(getBackground());
g.fillRect(0, 0, getWidth(), getHeight());
if (SystemInfo.isMac || !Registry.is("ide.scroll.track.border.paint")) return;
g.setColor(getForeground());
int x2 = getWidth() - 1;
int y2 = getHeight() - 1;
if (myPos == UPPER_LEFT_CORNER || myPos == UPPER_RIGHT_CORNER) {
g.drawLine(0, y2, x2, y2);
}
if (myPos == LOWER_LEFT_CORNER || myPos == LOWER_RIGHT_CORNER) {
g.drawLine(0, 0, x2, 0);
}
if (myPos == UPPER_LEFT_CORNER || myPos == LOWER_LEFT_CORNER) {
g.drawLine(x2, 0, x2, y2);
}
if (myPos == UPPER_RIGHT_CORNER || myPos == LOWER_RIGHT_CORNER) {
g.drawLine(0, 0, 0, y2);
}
}
}
private static class ViewportBorder extends LineBorder {
public ViewportBorder(int thickness) {
super(null, thickness);
}
@Override
public void paintBorder(Component c, Graphics g, int x, int y, int width, int height) {
updateColor(c);
super.paintBorder(c, g, x, y, width, height);
}
private void updateColor(Component c) {
if (!(c instanceof JScrollPane)) return;
lineColor = getViewBackground((JScrollPane)c);
}
}
/**
* These client properties modify a scroll pane layout.
* Use the class object as a property key.
*
* @see #putClientProperty(Object, Object)
*/
public enum Flip {
NONE, VERTICAL, HORIZONTAL, BOTH
}
/**
* These client properties show a component position on a scroll pane.
* It is set by internal layout manager of the scroll pane.
*/
public enum Alignment {
TOP, LEFT, RIGHT, BOTTOM;
public static Alignment get(JComponent component) {
if (component != null) {
Object property = component.getClientProperty(Alignment.class);
if (property instanceof Alignment) return (Alignment)property;
Container parent = component.getParent();
if (parent instanceof JScrollPane) {
JScrollPane pane = (JScrollPane)parent;
if (component == pane.getColumnHeader()) {
return TOP;
}
if (component == pane.getHorizontalScrollBar()) {
return BOTTOM;
}
boolean ltr = pane.getComponentOrientation().isLeftToRight();
if (component == pane.getVerticalScrollBar()) {
return ltr ? RIGHT : LEFT;
}
if (component == pane.getRowHeader()) {
return ltr ? LEFT : RIGHT;
}
}
// assume alignment for a scroll bar,
// which is not contained in a scroll pane
if (component instanceof JScrollBar) {
JScrollBar bar = (JScrollBar)component;
switch (bar.getOrientation()) {
case Adjustable.HORIZONTAL:
return BOTTOM;
case Adjustable.VERTICAL:
return bar.getComponentOrientation().isLeftToRight()
? RIGHT
: LEFT;
}
}
}
return null;
}
}
/**
* ScrollPaneLayout implementation that supports
* ScrollBar flipping and non-opaque ScrollBars.
*/
private static class Layout extends ScrollPaneLayout {
private static final Insets EMPTY_INSETS = new Insets(0, 0, 0, 0);
@Override
public void layoutContainer(Container parent) {
JScrollPane pane = (JScrollPane)parent;
// Calculate inner bounds of the scroll pane
Rectangle bounds = new Rectangle(pane.getWidth(), pane.getHeight());
JBInsets.removeFrom(bounds, pane.getInsets());
// Determine positions of scroll bars on the scroll pane
Object property = pane.getClientProperty(Flip.class);
Flip flip = property instanceof Flip ? (Flip)property : Flip.NONE;
boolean hsbOnTop = flip == Flip.BOTH || flip == Flip.VERTICAL;
boolean vsbOnLeft = pane.getComponentOrientation().isLeftToRight()
? flip == Flip.BOTH || flip == Flip.HORIZONTAL
: flip == Flip.NONE || flip == Flip.VERTICAL;
// If there's a visible row header remove the space it needs.
// The row header is treated as if it were fixed width, arbitrary height.
Rectangle rowHeadBounds = new Rectangle(bounds.x, 0, 0, 0);
if (rowHead != null && rowHead.isVisible()) {
rowHeadBounds.width = min(bounds.width, rowHead.getPreferredSize().width);
bounds.width -= rowHeadBounds.width;
if (vsbOnLeft) {
rowHeadBounds.x += bounds.width;
}
else {
bounds.x += rowHeadBounds.width;
}
}
// If there's a visible column header remove the space it needs.
// The column header is treated as if it were fixed height, arbitrary width.
Rectangle colHeadBounds = new Rectangle(0, bounds.y, 0, 0);
if (colHead != null && colHead.isVisible()) {
colHeadBounds.height = min(bounds.height, colHead.getPreferredSize().height);
bounds.height -= colHeadBounds.height;
if (hsbOnTop) {
colHeadBounds.y += bounds.height;
}
else {
bounds.y += colHeadBounds.height;
}
}
// If there's a JScrollPane.viewportBorder, remove the space it occupies
Border border = pane.getViewportBorder();
Insets insets = border == null ? null : border.getBorderInsets(parent);
JBInsets.removeFrom(bounds, insets);
if (insets == null) insets = EMPTY_INSETS;
// At this point:
// colHeadBounds is correct except for its width and x
// rowHeadBounds is correct except for its height and y
// bounds - the space available for the viewport and scroll bars
// Once we're through computing the dimensions of these three parts
// we can go back and set the bounds for the corners and the dimensions of
// colHeadBounds.x, colHeadBounds.width, rowHeadBounds.y, rowHeadBounds.height.
boolean isEmpty = bounds.width < 0 || bounds.height < 0;
Component view = viewport == null ? null : viewport.getView();
Dimension viewPreferredSize = view == null ? new Dimension() : view.getPreferredSize();
if (view instanceof JComponent) JBViewport.fixPreferredSize(viewPreferredSize, (JComponent)view, vsb, hsb);
Dimension viewportExtentSize = viewport == null ? new Dimension() : viewport.toViewCoordinates(bounds.getSize());
// If the view is tracking the viewports width we don't bother with a horizontal scrollbar.
// If the view is tracking the viewports height we don't bother with a vertical scrollbar.
Scrollable scrollable = null;
boolean viewTracksViewportWidth = false;
boolean viewTracksViewportHeight = false;
// Don't bother checking the Scrollable methods if there is no room for the viewport,
// we aren't going to show any scroll bars in this case anyway.
if (!isEmpty && view instanceof Scrollable) {
scrollable = (Scrollable)view;
viewTracksViewportWidth = scrollable.getScrollableTracksViewportWidth();
viewTracksViewportHeight = scrollable.getScrollableTracksViewportHeight();
}
// If there's a vertical scroll bar and we need one, allocate space for it.
// A vertical scroll bar is considered to be fixed width, arbitrary height.
boolean vsbOpaque = false;
boolean vsbNeeded = false;
int vsbPolicy = pane.getVerticalScrollBarPolicy();
if (!isEmpty && vsbPolicy != VERTICAL_SCROLLBAR_NEVER) {
vsbNeeded = vsbPolicy == VERTICAL_SCROLLBAR_ALWAYS
|| !viewTracksViewportHeight && viewPreferredSize.height > viewportExtentSize.height;
}
Rectangle vsbBounds = new Rectangle(0, bounds.y - insets.top, 0, 0);
if (vsb != null) {
if (!SystemInfo.isMac && view instanceof JTable) vsb.setOpaque(true);
vsbOpaque = vsb.isOpaque();
if (vsbNeeded) {
adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
if (vsbOpaque && viewport != null) {
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
}
}
}
// If there's a horizontal scroll bar and we need one, allocate space for it.
// A horizontal scroll bar is considered to be fixed height, arbitrary width.
boolean hsbOpaque = false;
boolean hsbNeeded = false;
int hsbPolicy = pane.getHorizontalScrollBarPolicy();
if (!isEmpty && hsbPolicy != HORIZONTAL_SCROLLBAR_NEVER) {
hsbNeeded = hsbPolicy == HORIZONTAL_SCROLLBAR_ALWAYS
|| !viewTracksViewportWidth && viewPreferredSize.width > viewportExtentSize.width;
}
Rectangle hsbBounds = new Rectangle(bounds.x - insets.left, 0, 0, 0);
if (hsb != null) {
if (!SystemInfo.isMac && view instanceof JTable) hsb.setOpaque(true);
hsbOpaque = hsb.isOpaque();
if (hsbNeeded) {
adjustForHSB(bounds, insets, hsbBounds, hsbOpaque, hsbOnTop);
if (hsbOpaque && viewport != null) {
// If we added the horizontal scrollbar and reduced the vertical space
// we may have to add the vertical scrollbar, if that hasn't been done so already.
if (vsb != null && !vsbNeeded && vsbPolicy != VERTICAL_SCROLLBAR_NEVER) {
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
vsbNeeded = viewPreferredSize.height > viewportExtentSize.height;
if (vsbNeeded) adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
}
}
}
}
// Set the size of the viewport first, and then recheck the Scrollable methods.
// Some components base their return values for the Scrollable methods on the size of the viewport,
// so that if we don't ask after resetting the bounds we may have gotten the wrong answer.
if (viewport != null) {
viewport.setBounds(bounds);
if (scrollable != null && hsbOpaque && vsbOpaque) {
viewTracksViewportWidth = scrollable.getScrollableTracksViewportWidth();
viewTracksViewportHeight = scrollable.getScrollableTracksViewportHeight();
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
boolean vsbNeededOld = vsbNeeded;
if (vsb != null && vsbPolicy == VERTICAL_SCROLLBAR_AS_NEEDED) {
boolean vsbNeededNew = !viewTracksViewportHeight && viewPreferredSize.height > viewportExtentSize.height;
if (vsbNeeded != vsbNeededNew) {
vsbNeeded = vsbNeededNew;
if (vsbNeeded) {
adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
}
else if (vsbOpaque) {
bounds.width += vsbBounds.width;
}
if (vsbOpaque) viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
}
}
boolean hsbNeededOld = hsbNeeded;
if (hsb != null && hsbPolicy == HORIZONTAL_SCROLLBAR_AS_NEEDED) {
boolean hsbNeededNew = !viewTracksViewportWidth && viewPreferredSize.width > viewportExtentSize.width;
if (hsbNeeded != hsbNeededNew) {
hsbNeeded = hsbNeededNew;
if (hsbNeeded) {
adjustForHSB(bounds, insets, hsbBounds, hsbOpaque, hsbOnTop);
}
else if (hsbOpaque) {
bounds.height += hsbBounds.height;
}
if (hsbOpaque && vsb != null && !vsbNeeded && vsbPolicy != VERTICAL_SCROLLBAR_NEVER) {
viewportExtentSize = viewport.toViewCoordinates(bounds.getSize());
vsbNeeded = viewPreferredSize.height > viewportExtentSize.height;
if (vsbNeeded) adjustForVSB(bounds, insets, vsbBounds, vsbOpaque, vsbOnLeft);
}
}
}
if (hsbNeededOld != hsbNeeded || vsbNeededOld != vsbNeeded) {
viewport.setBounds(bounds);
// You could argue that we should recheck the Scrollable methods again until they stop changing,
// but they might never stop changing, so we stop here and don't do any additional checks.
}
}
}
// Set the bounds of the row header.
rowHeadBounds.y = bounds.y - insets.top;
rowHeadBounds.height = bounds.height + insets.top + insets.bottom;
if (rowHead != null) {
rowHead.setBounds(rowHeadBounds);
rowHead.putClientProperty(Alignment.class, vsbOnLeft ? Alignment.RIGHT : Alignment.LEFT);
}
// Set the bounds of the column header.
colHeadBounds.x = bounds.x - insets.left;
colHeadBounds.width = bounds.width + insets.left + insets.right;
if (colHead != null) {
colHead.setBounds(colHeadBounds);
colHead.putClientProperty(Alignment.class, hsbOnTop ? Alignment.BOTTOM : Alignment.TOP);
}
// Calculate overlaps for translucent scroll bars
int overlapWidth = 0;
int overlapHeight = 0;
if (vsbNeeded && !vsbOpaque && hsbNeeded && !hsbOpaque) {
overlapWidth = vsbBounds.width; // shrink horizontally
//overlapHeight = hsbBounds.height; // shrink vertically
}
// Set the bounds of the vertical scroll bar.
vsbBounds.y = bounds.y - insets.top;
vsbBounds.height = bounds.height + insets.top + insets.bottom;
if (vsb != null) {
vsb.setVisible(vsbNeeded);
if (vsbNeeded) {
if (vsbOpaque && colHead != null && UIManager.getBoolean("ScrollPane.fillUpperCorner")) {
if ((vsbOnLeft ? upperLeft : upperRight) == null) {
// This is used primarily for GTK L&F, which needs to extend
// the vertical scrollbar to fill the upper corner near the column header.
// Note that we skip this step (and use the default behavior)
// if the user has set a custom corner component.
if (!hsbOnTop) vsbBounds.y -= colHeadBounds.height;
vsbBounds.height += colHeadBounds.height;
}
}
int overlapY = !hsbOnTop ? 0 : overlapHeight;
vsb.setBounds(vsbBounds.x, vsbBounds.y + overlapY, vsbBounds.width, vsbBounds.height - overlapHeight);
vsb.putClientProperty(Alignment.class, vsbOnLeft ? Alignment.LEFT : Alignment.RIGHT);
}
// Modify the bounds of the translucent scroll bar.
if (!vsbOpaque) {
if (!vsbOnLeft) vsbBounds.x += vsbBounds.width;
vsbBounds.width = 0;
}
}
// Set the bounds of the horizontal scroll bar.
hsbBounds.x = bounds.x - insets.left;
hsbBounds.width = bounds.width + insets.left + insets.right;
if (hsb != null) {
hsb.setVisible(hsbNeeded);
if (hsbNeeded) {
if (hsbOpaque && rowHead != null && UIManager.getBoolean("ScrollPane.fillLowerCorner")) {
if ((vsbOnLeft ? lowerRight : lowerLeft) == null) {
// This is used primarily for GTK L&F, which needs to extend
// the horizontal scrollbar to fill the lower corner near the row header.
// Note that we skip this step (and use the default behavior)
// if the user has set a custom corner component.
if (!vsbOnLeft) hsbBounds.x -= rowHeadBounds.width;
hsbBounds.width += rowHeadBounds.width;
}
}
int overlapX = !vsbOnLeft ? 0 : overlapWidth;
hsb.setBounds(hsbBounds.x + overlapX, hsbBounds.y, hsbBounds.width - overlapWidth, hsbBounds.height);
hsb.putClientProperty(Alignment.class, hsbOnTop ? Alignment.TOP : Alignment.BOTTOM);
}
// Modify the bounds of the translucent scroll bar.
if (!hsbOpaque) {
if (!hsbOnTop) hsbBounds.y += hsbBounds.height;
hsbBounds.height = 0;
}
}
// Set the bounds of the corners.
if (lowerLeft != null) {
lowerLeft.setBounds(vsbOnLeft ? vsbBounds.x : rowHeadBounds.x,
hsbOnTop ? colHeadBounds.y : hsbBounds.y,
vsbOnLeft ? vsbBounds.width : rowHeadBounds.width,
hsbOnTop ? colHeadBounds.height : hsbBounds.height);
}
if (lowerRight != null) {
lowerRight.setBounds(vsbOnLeft ? rowHeadBounds.x : vsbBounds.x,
hsbOnTop ? colHeadBounds.y : hsbBounds.y,
vsbOnLeft ? rowHeadBounds.width : vsbBounds.width,
hsbOnTop ? colHeadBounds.height : hsbBounds.height);
}
if (upperLeft != null) {
upperLeft.setBounds(vsbOnLeft ? vsbBounds.x : rowHeadBounds.x,
hsbOnTop ? hsbBounds.y : colHeadBounds.y,
vsbOnLeft ? vsbBounds.width : rowHeadBounds.width,
hsbOnTop ? hsbBounds.height : colHeadBounds.height);
}
if (upperRight != null) {
upperRight.setBounds(vsbOnLeft ? rowHeadBounds.x : vsbBounds.x,
hsbOnTop ? hsbBounds.y : colHeadBounds.y,
vsbOnLeft ? rowHeadBounds.width : vsbBounds.width,
hsbOnTop ? hsbBounds.height : colHeadBounds.height);
}
if (!vsbOpaque && vsbNeeded || !hsbOpaque && hsbNeeded) {
fixComponentZOrder(vsb, 0);
fixComponentZOrder(viewport, -1);
}
}
private static void fixComponentZOrder(Component component, int index) {
if (component != null) {
Container parent = component.getParent();
synchronized (parent.getTreeLock()) {
if (index < 0) index += parent.getComponentCount();
parent.setComponentZOrder(component, index);
}
}
}
private void adjustForVSB(Rectangle bounds, Insets insets, Rectangle vsbBounds, boolean vsbOpaque, boolean vsbOnLeft) {
vsbBounds.width = !vsb.isEnabled() ? 0 : min(bounds.width, vsb.getPreferredSize().width);
if (vsbOnLeft) {
vsbBounds.x = bounds.x - insets.left/* + vsbBounds.width*/;
if (vsbOpaque) bounds.x += vsbBounds.width;
}
else {
vsbBounds.x = bounds.x + bounds.width + insets.right - vsbBounds.width;
}
if (vsbOpaque) bounds.width -= vsbBounds.width;
}
private void adjustForHSB(Rectangle bounds, Insets insets, Rectangle hsbBounds, boolean hsbOpaque, boolean hsbOnTop) {
hsbBounds.height = !hsb.isEnabled() ? 0 : min(bounds.height, hsb.getPreferredSize().height);
if (hsbOnTop) {
hsbBounds.y = bounds.y - insets.top/* + hsbBounds.height*/;
if (hsbOpaque) bounds.y += hsbBounds.height;
}
else {
hsbBounds.y = bounds.y + bounds.height + insets.bottom - hsbBounds.height;
}
if (hsbOpaque) bounds.height -= hsbBounds.height;
}
private static int min(int one, int two) {
return Math.max(0, Math.min(one, two));
}
}
/**
* Indicates whether the specified event is not consumed and does not have unexpected modifiers.
*
* @param event a mouse wheel event to check for validity
* @return {@code true} if the specified event is valid, {@code false} otherwise
*/
public static boolean isScrollEvent(@NotNull MouseWheelEvent event) {
if (event.isConsumed()) return false; // event should not be consumed already
if (event.getWheelRotation() == 0) return false; // any rotation expected (forward or backward)
return 0 == (SCROLL_MODIFIERS & event.getModifiers());
}
private static final int SCROLL_MODIFIERS = // event modifiers allowed during scrolling
~InputEvent.SHIFT_MASK & ~InputEvent.SHIFT_DOWN_MASK & // for horizontal scrolling
~InputEvent.BUTTON1_MASK & ~InputEvent.BUTTON1_DOWN_MASK; // for selection
}
| hurricup/intellij-community | platform/platform-api/src/com/intellij/ui/components/JBScrollPane.java | Java | apache-2.0 | 33,437 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html><head>
<title>SQLite Database File Format Requirements</title>
<style type="text/css">
body {
margin: auto;
font-family: "Verdana" "sans-serif";
padding: 8px 1%;
}
a { color: #45735f }
a:visited { color: #734559 }
.logo { position:absolute; margin:3px; }
.tagline {
float:right;
text-align:right;
font-style:italic;
width:240px;
margin:12px;
margin-top:58px;
}
.toolbar {
font-variant: small-caps;
text-align: center;
line-height: 1.6em;
margin: 0;
padding:1px 8px;
}
.toolbar a { color: white; text-decoration: none; padding: 6px 12px; }
.toolbar a:visited { color: white; }
.toolbar a:hover { color: #80a796; background: white; }
.content { margin: 5%; }
.content dt { font-weight:bold; }
.content dd { margin-bottom: 25px; margin-left:20%; }
.content ul { padding:0px; padding-left: 15px; margin:0px; }
/* rounded corners */
.se { background: url(images/se.png) 100% 100% no-repeat #80a796}
.sw { background: url(images/sw.png) 0% 100% no-repeat }
.ne { background: url(images/ne.png) 100% 0% no-repeat }
.nw { background: url(images/nw.png) 0% 0% no-repeat }
</style>
<meta http-equiv="content-type" content="text/html; charset=UTF-8">
</head>
<body>
<div><!-- container div to satisfy validator -->
<a href="index.html">
<img class="logo" src="images/SQLite.gif" alt="SQLite Logo"
border="0"></a>
<div><!-- IE hack to prevent disappearing logo--></div>
<div class="tagline">Small. Fast. Reliable.<br>Choose any three.</div>
<table width=100% style="clear:both"><tr><td>
<div class="se"><div class="sw"><div class="ne"><div class="nw">
<div class="toolbar">
<a href="about.html">About</a>
<a href="sitemap.html">Sitemap</a>
<a href="docs.html">Documentation</a>
<a href="download.html">Download</a>
<a href="copyright.html">License</a>
<a href="news.html">News</a>
<a href="http://www.sqlite.org/cvstrac/index">Developers</a>
<a href="support.html">Support</a>
</div></div></div></div></div>
</td></tr></table>
<h1 align="center">
Requirements for the SQLite Database File Format
</h1>
<p>
This document contains detailed <a href="requirements.html">requirements</a> for the database
<a href="fileformat.html">file format</a> and the <a href="fileio.html">file I/O</a> characteristics of SQLite.
</p>
<hr>
<a name="H30010"></a>
<p><b>H30010:</b>
The system shall ensure that at the successful conclusion of a
database transaction the contents of the database file constitute
a <i>well-formed SQLite database file</i>.
<a name="H30020"></a>
<p><b>H30020:</b>
The system shall ensure that at the successful conclusion of a
database transaction the contents of the database file are a valid
serialization of the contents of the logical SQL database produced
by the transaction.
<a name="H30030"></a>
<p><b>H30030:</b>
The first 16 bytes of a well-formed database file contain the UTF-8
encoding of the string "SQLite format 3" followed by a single
nul-terminator byte.
<a name="H30040"></a>
<p><b>H30040:</b>
The 19th byte (byte offset 18), the <i>file-format write version</i>,
of a well-formed database file contains the value 0x01.
<a name="H30050"></a>
<p><b>H30050:</b>
The 20th byte (byte offset 19), the <i>file-format read version</i>,
of a well-formed database file contains the value 0x01.
<a name="H30060"></a>
<p><b>H30060:</b>
The 21st byte (byte offset 20), the number of unused bytes on each
page, of a well-formed database file shall contain the value 0x00.
<a name="H30070"></a>
<p><b>H30070:</b>
The 22nd byte (byte offset 21), the maximum fraction of an index
B-Tree page to use for embedded content, of a well-formed database
file shall contain the value 0x40.
<a name="H30080"></a>
<p><b>H30080:</b>
The 23rd byte (byte offset 22), the minimum fraction of an index
B-Tree page to use for embedded content when using overflow pages,
of a well-formed database file contains the value 0x20.
<a name="H30090"></a>
<p><b>H30090:</b>
The 24th byte (byte offset 23), the minimum fraction of a table
B-Tree page to use for embedded content when using overflow pages,
of a well-formed database file contains the value 0x20.
<a name="H30100"></a>
<p><b>H30100:</b>
The 4 byte block starting at byte offset 24 of a well-formed
database file contains the <i>file change counter</i> formatted
as a 4-byte big-endian integer.
<a name="H30110"></a>
<p><b>H30110:</b>
The 4 byte block starting at byte offset 40 of a well-formed
database file contains the <i>schema version</i> formatted
as a 4-byte big-endian integer.
<a name="H30120"></a>
<p><b>H30120:</b>
The 4 byte block starting at byte offset 44 of a well-formed
database file, the <i>schema layer file format</i>, contains a
big-endian integer value between 1 and 4, inclusive.
<a name="H30130"></a>
<p><b>H30130:</b>
The 4 byte block starting at byte offset 48 of a well-formed
database file contains the <i>default pager cache size</i> formatted
as a 4-byte big-endian integer.
<a name="H30140"></a>
<p><b>H30140:</b>
The 4 byte block starting at byte offset 52 of a well-formed
database file contains the <i>auto-vacuum last root-page</i>
formatted as a 4-byte big-endian integer. If this value is non-zero,
the database is said to be an <i>auto-vacuum database</i>.
<a name="H30150"></a>
<p><b>H30150:</b>
The 4 byte block starting at byte offset 56 of a well-formed
database file, the <i>text encoding</i> contains a big-endian integer
value between 1 and 3, inclusive.
<a name="H30160"></a>
<p><b>H30160:</b>
The 4 byte block starting at byte offset 60 of a well-formed
database file contains the <i>user cookie</i> formatted
as a 4-byte big-endian integer.
<a name="H30170"></a>
<p><b>H30170:</b>
The 4 byte block starting at byte offset 64 of a well-formed
database file, the <i>incremental vaccum flag</i> contains a big-endian
integer value between 0 and 1, inclusive.
<a name="H30180"></a>
<p><b>H30180:</b>
In a well-formed non-autovacuum database (one with a zero stored
in the 4-byte big-endian integer value beginning at byte offset
52 of the database file header, the incremental vacuum flag is
set to 0.
<a name="H30190"></a>
<p><b>H30190:</b>
The <i>database page size</i> of a well-formed database, stored as a
2-byte big-endian unsigned integer at byte offset 16 of the file,
shall be an integer power of 2 between 512 and 32768, inclusive.
<a name="H30200"></a>
<p><b>H30200:</b>
The size of a <i>well formed database file</i> shall be an integer
multiple of the <i>database page size</i>.
<a name="H30210"></a>
<p><b>H30210:</b>
Each page of a <i>well formed database file</i> is exactly one of a
<i>B-Tree page</i>, an <i>overflow page</i>, a <i>free page</i>, a
<i>pointer-map page</i> or the <i>locking page</i>.
<a name="H30220"></a>
<p><b>H30220:</b>
The database page that starts at byte offset 2<sup>30</sup>, the
<i>locking page</i>, is never used for any purpose.
<a name="H30230"></a>
<p><b>H30230:</b>
In a <i>well-formed database file</i>, the portion of the first
database page not consumed by the database file-header (all but the
first 100 bytes) contains the root node of a table B-Tree,
the <i>schema table</i>.
<a name="H30240"></a>
<p><b>H30240:</b>
All records stored in the <i>schema table</i> contain exactly five
fields.
<a name="H30250"></a>
<p><b>H30250:</b>
For each SQL table in the database apart from itself
("sqlite_master"), the <i>schema table</i> of a <i>well-formed
database file</i> contains an associated record.
<a name="H30260"></a>
<p><b>H30260:</b>
The first field of each <i>schema table</i> record associated with an
SQL table shall be the text value "table".
<a name="H30270"></a>
<p><b>H30270:</b>
The second field of each <i>schema table</i> record associated with an
SQL table shall be a text value set to the name of the SQL table.
<a name="H30280"></a>
<p><b>H30280:</b>
In a <i>well-formed database file</i>, the third field of all
<i>schema table</i> records associated with SQL tables shall contain
the same value as the second field.
<a name="H30290"></a>
<p><b>H30290:</b>
In a <i>well-formed database file</i>, the fourth field of all
<i>schema table</i> records associated with SQL tables that are not
virtual tables contains the page number (an integer value) of the root
page of the associated <i>table B-Tree</i> structure within the
database file.
<a name="H30300"></a>
<p><b>H30300:</b>
If the associated database table is a virtual table, the fourth
field of the <i>schema table</i> record shall contain an SQL NULL
value.
<a name="H30310"></a>
<p><b>H30310:</b>
In a well-formed database, the fifth field of all <i>schema table</i>
records associated with SQL tables shall contain a "CREATE TABLE"
or "CREATE VIRTUAL TABLE" statment (a text value). The details
of the statement shall be such that executing the statement
would create a table of precisely the same name and schema as the
existing database table.
<a name="H30320"></a>
<p><b>H30320:</b>
For each PRIMARY KEY or UNIQUE constraint present in the definition
of each SQL table in the database, the schema table of a well-formed
database shall contain a record with the first field set to the text
value "index", and the second field set to a text value containing a
string of the form "sqlite_autoindex_<name>_<idx>", where
<name> is the name of the SQL table and <idx> is an
integer value.
<a name="H30330"></a>
<p><b>H30330:</b>
In a well-formed database, the third field of all schema table
records associated with SQL PRIMARY KEY or UNIQUE constraints shall
contain the name of the table to which the constraint applies (a
text value).
<a name="H30340"></a>
<p><b>H30340:</b>
In a well-formed database, the fourth field of all schema table
records associated with SQL PRIMARY KEY or UNIQUE constraints shall
contain the page number (an integer value) of the root page of the
associated index B-Tree structure.
<a name="H30350"></a>
<p><b>H30350:</b>
In a well-formed database, the fifth field of all schema table
records associated with SQL PRIMARY KEY or UNIQUE constraints shall
contain an SQL NULL value.
<a name="H30360"></a>
<p><b>H30360:</b>
For each SQL index in the database, the schema table of a well-formed
database shall contain a record with the first field set to the text
value "index" and the second field set to a text value containing the
name of the SQL index.
<a name="H30370"></a>
<p><b>H30370:</b>
In a well-formed database, the third field of all schema table
records associated with SQL indexes shall contain the name of the
SQL table that the index applies to.
<a name="H30380"></a>
<p><b>H30380:</b>
In a well-formed database, the fourth field of all schema table
records associated with SQL indexes shall contain the page number
(an integer value) of the root page of the associated index B-Tree
structure.
<a name="H30390"></a>
<p><b>H30390:</b>
In a well-formed database, the fifth field of all schema table
records associated with SQL indexes shall contain an SQL "CREATE
INDEX" statement (a text value). The details of the statement shall
be such that executing the statement would create an index of
precisely the same name and content as the existing database index.
<a name="H30400"></a>
<p><b>H30400:</b>
For each SQL view in the database, the schema table of a well-formed
database shall contain a record with the first field set to the text
value "view" and the second field set to a text value containing the
name of the SQL view.
<a name="H30410"></a>
<p><b>H30410:</b>
In a well-formed database, the third field of all schema table
records associated with SQL views shall contain the same value as
the second field.
<a name="H30420"></a>
<p><b>H30420:</b>
In a well-formed database, the third field of all schema table
records associated with SQL views shall contain the integer value 0.
<a name="H30430"></a>
<p><b>H30430:</b>
In a well-formed database, the fifth field of all schema table
records associated with SQL indexes shall contain an SQL "CREATE
VIEW" statement (a text value). The details of the statement shall
be such that executing the statement would create a view of
precisely the same name and definition as the existing database view.
<a name="H30440"></a>
<p><b>H30440:</b>
For each SQL trigger in the database, the schema table of a well-formed
database shall contain a record with the first field set to the text
value "trigger" and the second field set to a text value containing the
name of the SQL trigger.
<a name="H30450"></a>
<p><b>H30450:</b>
In a well-formed database, the third field of all schema table
records associated with SQL triggers shall contain the name of the
database table or view to which the trigger applies.
<a name="H30460"></a>
<p><b>H30460:</b>
In a well-formed database, the third field of all schema table
records associated with SQL triggers shall contain the integer value 0.
<a name="H30470"></a>
<p><b>H30470:</b>
In a well-formed database, the fifth field of all schema table
records associated with SQL indexes shall contain an SQL "CREATE
TRIGGER" statement (a text value). The details of the statement shall
be such that executing the statement would create a trigger of
precisely the same name and definition as the existing database trigger.
<a name="H30480"></a>
<p><b>H30480:</b>
In an auto-vacuum database, all pages that occur before the page
number stored in the <i>auto-vacuum last root-page</i> field
of the database file header (see H30140) must be either B-Tree <i>root
pages</i>, <i>pointer-map pages</i> or the <i>locking page</i>.
<a name="H30490"></a>
<p><b>H30490:</b>
In an auto-vacuum database, no B-Tree <i>root pages</i> may occur
on or after the page number stored in the <i>auto-vacuum last root-page</i> field
of the database file header (see H30140) must be either B-Tree <i>root
pages</i>, <i>pointer-map pages</i> or the <i>locking page</i>.
<a name="H30500"></a>
<p><b>H30500:</b>
As well as the <i>schema table</i>, a <i>well-formed database file</i>
contains <i>N</i> table B-Tree structures, where <i>N</i> is the
number of non-virtual tables in the logical database, excluding the
sqlite_master table but including sqlite_sequence and other system
tables.
<a name="H30510"></a>
<p><b>H30510:</b>
A well-formed database file contains <i>N</i> table B-Tree structures,
where <i>N</i> is the number of indexes in the logical database,
including indexes created by UNIQUE or PRIMARY KEY clauses in the
declaration of SQL tables.
<a name="H30520"></a>
<p><b>H30520:</b>
A 64-bit signed integer value stored in <i>variable length integer</i>
format consumes from 1 to 9 bytes of space.
<a name="H30530"></a>
<p><b>H30530:</b>
The most significant bit of all bytes except the last in a serialized
<i>variable length integer</i> is always set. Unless the serialized
form consumes the maximum 9 bytes available, then the most significant
bit of the final byte of the representation is always cleared.
<a name="H30540"></a>
<p><b>H30540:</b>
The eight least significant bytes of the 64-bit twos-compliment
representation of a value stored in a 9 byte <i>variable length
integer</i> are stored in the final byte (byte offset 8) of the
serialized <i>variable length integer</i>. The other 56 bits are
stored in the 7 least significant bits of each of the first 8 bytes
of the serialized <i>variable length integer</i>, in order from
most significant to least significant.
<a name="H30550"></a>
<p><b>H30550:</b>
A <i>variable length integer</i> that consumes less than 9 bytes of
space contains a value represented as an <i>N</i>-bit unsigned
integer, where <i>N</i> is equal to the number of bytes consumed by
the serial representation (between 1 and 8) multiplied by 7. The
<i>N</i> bits are stored in the 7 least significant bits of each
byte of the serial representation, from most to least significant.
<a name="H30560"></a>
<p><b>H30560:</b>
A <i>database record</i> consists of a <i>database record header</i>,
followed by <i>database record data</i>. The first part of the
<i>database record header</i> is a <i>variable length integer</i>
containing the total size (including itself) of the header in bytes.
<a name="H30570"></a>
<p><b>H30570:</b>
Following the length field, the remainder of the <i>database record
header</i> is populated with <i>N</i> <i>variable length integer</i>
fields, where <i>N</i> is the number of database values stored in
the record.
<a name="H30580"></a>
<p><b>H30580:</b>
Following the <i>database record header</i>, the <i>database record
data</i> is made up of <i>N</i> variable length blobs of data, where
<i>N</i> is again the number of database values stored in the record.
The <i>n</i> blob contains the data for the <i>n</i>th value in
the database record. The size and format of each blob of data is
encoded in the corresponding <i>variable length integer</i> field
in the <i>database record header</i>.
<a name="H30590"></a>
<p><b>H30590:</b>
A value of 0 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL NULL. In this case
the blob of data in the data area is 0 bytes in size.
<a name="H30600"></a>
<p><b>H30600:</b>
A value of 1 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer. In this case
the blob of data contains the integer value, formatted as a 1-byte
big-endian signed integer.
<a name="H30610"></a>
<p><b>H30610:</b>
A value of 2 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer. In this case
the blob of data contains the integer value, formatted as a 2-byte
big-endian signed integer.
<a name="H30620"></a>
<p><b>H30620:</b>
A value of 3 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer. In this case
the blob of data contains the integer value, formatted as a 3-byte
big-endian signed integer.
<a name="H30630"></a>
<p><b>H30630:</b>
A value of 4 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer. In this case
the blob of data contains the integer value, formatted as a 4-byte
big-endian signed integer.
<a name="H30640"></a>
<p><b>H30640:</b>
A value of 5 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer. In this case
the blob of data contains the integer value, formatted as a 6-byte
big-endian signed integer.
<a name="H30650"></a>
<p><b>H30650:</b>
A value of 6 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer. In this case
the blob of data contains the integer value, formatted as a 8-byte
big-endian signed integer.
<a name="H30660"></a>
<p><b>H30660:</b>
A value of 7 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL real (floating
point number). In this case the blob of data contains an 8-byte
IEEE floating point number, stored in big-endian byte order.
<a name="H30670"></a>
<p><b>H30670:</b>
A value of 8 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer, value 0.
In this case the blob of data in the data area is 0 bytes in size.
<a name="H30680"></a>
<p><b>H30680:</b>
A value of 9 stored within the <i>database record header</i> indicates
that the corresponding database value is an SQL integer, value 1.
In this case the blob of data in the data area is 0 bytes in size.
<a name="H30690"></a>
<p><b>H30690:</b>
An even value greater than or equal to 12 stored within the
<i>database record header</i> indicates that the corresponding
database value is an SQL blob field. The blob of data contains the
value data. The blob of data is exactly (<i>n</i>-12)/2 bytes
in size, where <i>n</i> is the integer value stored in the
<i>database record header</i>.
<a name="H30700"></a>
<p><b>H30700:</b>
An odd value greater than or equal to 13 stored within the
<i>database record header</i> indicates that the corresponding
database value is an SQL text field. The blob of data contains the
value text stored using the <i>database encoding</i>, with no
nul-terminator. The blob of data is exactly (<i>n</i>-12)/2 bytes
in size, where <i>n</i> is the integer value stored in the
<i>database record header</i>.
<a name="H30710"></a>
<p><b>H30710:</b>
In a well-formed database file, if the values 8 or 9 appear within
any <i>database record header</i> within the database, then the
<i>schema-layer file format</i> (stored at byte offset 44 of the
database file header) must be set to 4.
<a name="H30720"></a>
<p><b>H30720:</b>
In a well-formed database file, the values 10 and 11, and all
negative values may not appear within any <i>database record header</i>
in the database.
<a name="H30730"></a>
<p><b>H30730:</b>
The pages in an index B-Tree structures are arranged into a tree
structure such that all leaf pages are at the same depth.
<a name="H30740"></a>
<p><b>H30740:</b>
Each leaf node page in an index B-Tree contains one or more
B-Tree cells, where each cell contains a database record.
<a name="H30750"></a>
<p><b>H30750:</b>
Each internal node page in an index B-Tree contains one or more
B-Tree cells, where each cell contains a child page number, <i>C</i>,
and a database record <i>R</i>. All database records stored within
the sub-tree headed by page <i>C</i> are smaller than record <i>R</i>,
according to the index sort order (see below). Additionally, unless
<i>R</i> is the smallest database record stored on the internal node
page, all integer keys within the sub-tree headed by <i>C</i> are
greater than <i>R<sub>-1</sub></i>, where <i>R<sub>-1</sub></i> is the
largest database record on the internal node page that is smaller
than <i>R</i>.
<a name="H30760"></a>
<p><b>H30760:</b>
As well as child page numbers associated with B-Tree cells, each
internal node page in an index B-Tree contains the page number
of an extra child page, the <i>right-child page</i>. All database
records stored in all B-Tree cells within the sub-tree headed by the
<i>right-child page</i> are greater than all database records
stored within B-Tree cells on the internal node page.
<a name="H30770"></a>
<p><b>H30770:</b>
In a well-formed database, each index B-Tree contains a single entry
for each row in the indexed logical database table.
<a name="H30780"></a>
<p><b>H30780:</b>
Each <i>database record</i> (key) stored by an index B-Tree in a
well-formed database contains the same number of values, the number
of indexed columns plus one.
<a name="H30790"></a>
<p><b>H30790:</b>
The final value in each <i>database record</i> (key) stored by an
index B-Tree in a well-formed database contains the rowid (an integer
value) of the corresponding logical database row.
<a name="H30800"></a>
<p><b>H30800:</b>
The first <i>N</i> values in each <i>database record</i> (key)
stored in an index B-Tree where <i>N</i> is the number of indexed
columns, contain the values of the indexed columns from the
corresponding logical database row, in the order specified for the
index.
<a name="H30810"></a>
<p><b>H30810:</b>
The <i>b-tree page flags</i> field (the first byte) of each database
page used as an internal node of an index B-Tree structure is set to
0x02.
<a name="H30820"></a>
<p><b>H30820:</b>
The <i>b-tree page flags</i> field (the first byte) of each database
page used as a leaf node of an index B-Tree structure is set to 0x0A.
<a name="H30830"></a>
<p><b>H30830:</b>
The first byte of each database page used as a B-Tree page contains
the <i>b-tree page flags</i> field. On page 1, the <i>b-tree page
flags</i> field is stored directly after the 100 byte file header
at byte offset 100.
<a name="H30840"></a>
<p><b>H30840:</b>
The number of B-Tree cells stored on a B-Tree page is stored as a
2-byte big-endian integer starting at byte offset 3 of the B-Tree
page. On page 1, this field is stored at byte offset 103.
<a name="H30850"></a>
<p><b>H30850:</b>
The 2-byte big-endian integer starting at byte offset 5 of each
B-Tree page contains the byte-offset from the start of the page
to the start of the <i>cell content area</i>, which consumes all space
from this offset to the end of the usable region of the page.
On page 1, this field is stored at byte offset 105. All B-Tree
cells on the page are stored within the cell-content area.
<a name="H30860"></a>
<p><b>H30860:</b>
On each page used as an internal node a of B-Tree structures, the
page number of the rightmost child node in the B-Tree structure is
stored as a 4-byte big-endian unsigned integer beginning at byte
offset 8 of the database page, or byte offset 108 on page 1.
<a name="H30870"></a>
<p><b>H30870:</b>
Immediately following the <i>page header</i> on each B-Tree page is the
<i>cell offset array</i>, consisting of <i>N</i> 2-byte big-endian
unsigned integers, where <i>N</i> is the number of cells stored
on the B-Tree page (H30840). On an internal node B-Tree page,
the cell offset array begins at byte offset 12, or on a leaf
page, byte offset 8. For the B-Tree node on page 1, these
offsets are 112 and 108, respectively.
<a name="H30880"></a>
<p><b>H30880:</b>
The <i>cell offset array</i> and the <i>cell content area</i> (H30850)
may not overlap.
<a name="H30890"></a>
<p><b>H30890:</b>
Each value stored in the <i>cell offset array</i> must be greater
than or equal to the offset to the <i>cell content area</i> (H30850),
and less than the database <i>page size</i>.
<a name="H30900"></a>
<p><b>H30900:</b>
The <i>N</i> values stored within the <i>cell offset array</i> are the
byte offsets from the start of the B-Tree page to the beginning of
each of the <i>N</i> cells stored on the page.
<a name="H30910"></a>
<p><b>H30910:</b>
No two B-Tree cells may overlap.
<a name="H30920"></a>
<p><b>H30920:</b>
Within the <i>cell content area</i>, all blocks of contiguous
free-space (space not used by B-Tree cells) greater than 3 bytes in
size are linked together into a linked list, the <i>free block list</i>.
Such blocks of free space are known as <i>free blocks</i>.
<a name="H30930"></a>
<p><b>H30930:</b>
The first two bytes of each <i>free block</i> contain the offset
of the next <i>free block</i> in the <i>free block list</i> formatted
as a 2-byte big-endian integer, relative to the start of the database
page. If there is no next <i>free block</i>, then the first two
bytes are set to 0x00.
<a name="H30940"></a>
<p><b>H30940:</b>
The second two bytes (byte offsets 2 and 3) of each <i>free block</i>
contain the total size of the <i>free block</i>, formatted as a 2-byte
big-endian integer.
<a name="H30950"></a>
<p><b>H30950:</b>
On all B-Tree pages, the offset of the first <i>free block</i> in the
<i>free block list</i>, relative to the start of the database page,
is stored as a 2-byte big-endian integer starting at byte offset
1 of the database page. If there is no first <i>free block</i>
(because the <i>free block list</i> is empty), then the two bytes
at offsets 1 and 2 of the database page are set to 0x00. On page 1,
this field is stored at byte offset 101 of the page.
<a name="H30960"></a>
<p><b>H30960:</b>
Within the cell-content area, all blocks of contiguous free-space
(space not used by B-Tree cells) less than or equal to 3 bytes in
size are known as <i>fragments</i>. The total size of all
<i>fragments</i> on a B-Tree page is stored as a 1-byte unsigned
integer at byte offset 7 of the database page. On page 1, this
field is stored at byte offset 107.
<a name="H30970"></a>
<p><b>H30970:</b>
Each B-Tree cell belonging to an internal node page of an index
B-Tree consists of a 4-byte big-endian unsigned integer, the
<i>child page number</i>, followed by a <i>variable length integer</i>
field, followed by a <i>database record</i>. The
<i>variable length integer</i> field contains the length of the
database record in bytes.
<a name="H30980"></a>
<p><b>H30980:</b>
Each B-Tree cell belonging to an leaf page of an index B-Tree
consists of a <i>variable length integer</i> field, followed by
a <i>database record</i>. The <i>variable length integer</i> field
contains the length of the database record in bytes.
<a name="H30990"></a>
<p><b>H30990:</b>
If the database record stored in an index B-Tree page is
sufficiently small, then the entire cell is stored within the
index B-Tree page. Sufficiently small is defined as equal to or
less than <i>max-local</i>, where:
<code>
<i>max-local</i> := (<i>usable-size</i> - 12) * 64 / 255 - 23</code>
<a name="H31000"></a>
<p><b>H31000:</b>
If the database record stored as part of an index B-Tree cell is too
large to be stored entirely within the B-Tree page (as defined by
H30520), then only a prefix of the <i>database record</i> is stored
within the B-Tree page and the remainder stored in an <i>overflow
chain</i>. In this case, the database record prefix is immediately
followed by the page number of the first page of the
<i>overflow chain</i>, formatted as a 4-byte big-endian unsigned
integer.
<a name="H31010"></a>
<p><b>H31010:</b>
When a <i>database record</i> belonging to a table B-Tree cell is
stored partially within an <i>overflow page chain</i>, the size
of the prefix stored within the index B-Tree page is <i>N</i> bytes,
where <i>N</i> is calculated using the following algorithm:
<code>
<i>min-local</i> := (<i>usable-size</i> - 12) * 32 / 255 - 23
<i>max-local</i> := (<i>usable-size</i> - 12) * 64 / 255 - 23
<i>N</i> := <i>min-local</i> + ((<i>record-size</i> - <i>min-local</i>) % (<i>usable-size</i> - 4))
if( <i>N</i> > <i>max-local</i> ) <i>N</i> := <i>min-local</i></code>
<a name="H31020"></a>
<p><b>H31020:</b>
The pages in a table B-Tree structures are arranged into a tree
structure such that all leaf pages are at the same depth.
<a name="H31030"></a>
<p><b>H31030:</b>
Each leaf page in a table B-Tree structure contains one or more
B-Tree cells, where each cell contains a 64-bit signed integer key
value and a database record.
<a name="H31040"></a>
<p><b>H31040:</b>
Each internal node page in a table B-Tree structure contains one or
more B-Tree cells, where each cell contains a 64-bit signed integer
key value, <i>K</i>, and a child page number, <i>C</i>. All integer key
values in all B-Tree cells within the sub-tree headed by page <i>C</i>
are less than or equal to <i>K</i>. Additionally, unless <i>K</i>
is the smallest integer key value stored on the internal node page,
all integer keys within the sub-tree headed by <i>C</i> are greater
than <i>K<sub>-1</sub></i>, where <i>K<sub>-1</sub></i> is the largest
integer key on the internal node page that is smaller than <i>K</i>.
<a name="H31050"></a>
<p><b>H31050:</b>
As well as child page numbers associated with B-Tree cells, each
internal node page in a table B-Tree contains the page number
of an extra child page, the <i>right-child page</i>. All key values
in all B-Tree cells within the sub-tree headed by the <i>right-child
page</i> are greater than all key values stored within B-Tree cells
on the internal node page.
<a name="H31060"></a>
<p><b>H31060:</b>
In a well-formed database, each table B-Tree contains a single entry
for each row in the corresponding logical database table.
<a name="H31070"></a>
<p><b>H31070:</b>
The key value (a 64-bit signed integer) for each B-Tree entry is
the same as the value of the rowid field of the corresponding
logical database row.
<a name="H31080"></a>
<p><b>H31080:</b>
The SQL values serialized to make up each <i>database record</i>
stored as ancillary data in a table B-Tree shall be the equal to the
values taken by the <i>N</i> leftmost columns of the corresponding
logical database row, where <i>N</i> is the number of values in the
database record.
<a name="H31090"></a>
<p><b>H31090:</b>
If a logical database table column is declared as an "INTEGER
PRIMARY KEY", then instead of its integer value, an SQL NULL
shall be stored in its place in any database records used as
ancillary data in a table B-Tree.
<a name="H31100"></a>
<p><b>H31100:</b>
If the database <i>schema layer file-format</i> (the value stored
as a 4-byte integer at byte offset 44 of the file header) is 1,
then all database records stored as ancillary data in a table
B-Tree structure have the same number of fields as there are
columns in the corresponding logical database table.
<a name="H31110"></a>
<p><b>H31110:</b>
If the database <i>schema layer file-format</i> value is two or
greater and the rightmost <i>M</i> columns of a row contain SQL NULL
values, then the corresponding record stored as ancillary data in
the table B-Tree has between <i>N</i>-<i>M</i> and <i>N</i> fields,
where <i>N</i> is the number of columns in the logical database
table.
<a name="H31120"></a>
<p><b>H31120:</b>
If the database <i>schema layer file-format</i> value is three or
greater and the rightmost <i>M</i> columns of a row contain their
default values according to the logical table declaration, then the
corresponding record stored as ancillary data in the table B-Tree
may have as few as <i>N</i>-<i>M</i> fields, where <i>N</i> is the
number of columns in the logical database table.
<a name="H31130"></a>
<p><b>H31130:</b>
In a <i>well-formed database file</i>, the first byte of each page used
as an internal node of a table B-Tree structure is set to 0x05.
<a name="H31140"></a>
<p><b>H31140:</b>
In a <i>well-formed database file</i>, the first byte of each page used
as a leaf node of a table B-Tree structure is set to 0x0D.
<a name="H31150"></a>
<p><b>H31150:</b>
B-Tree cells belonging to table B-Tree internal node pages consist
of exactly two fields, a 4-byte big-endian unsigned integer
immediately followed by a <i>variable length integer</i>. These
fields contain the child page number and key value respectively
(see H31030).
<a name="H31160"></a>
<p><b>H31160:</b>
B-Tree cells belonging to table B-Tree leaf node pages consist
of three fields, two <i>variable length integer</i> values
followed by a database record. The size of the database record
in bytes is stored in the first of the two
<i>variable length integer</i> fields. The second of the two
<i>variable length integer</i> fields contains the 64-bit signed
integer key (see H31030).
<a name="H31170"></a>
<p><b>H31170:</b>
If the size of the record stored in a table B-Tree leaf page cell
is less than or equal to (<i>usable page size</i>-35) bytes, then
the entire cell is stored on the B-Tree leaf page. In a well-formed
database, <i>usable page size</i> is the same as the database
<i>page size</i>.
<a name="H31180"></a>
<p><b>H31180:</b>
If a table B-Tree cell is too large to be stored entirely on
a leaf page (as defined by H31170), then a prefix of the cell
is stored on the leaf page, and the remainder stored in an
<i>overflow page chain</i>. In this case the cell prefix
stored on the B-Tree leaf page is immediately followed by a
4-byte big-endian unsigned integer containing the page number
of the first overflow page in the chain.
<a name="H31190"></a>
<p><b>H31190:</b>
When a table B-Tree cell is stored partially in an
<i>overflow page chain</i>, the prefix stored on the B-Tree
leaf page consists of the two <i>variable length integer</i> fields,
followed by the first <i>N</i> bytes of the database record, where
<i>N</i> is determined by the following algorithm:
<code>
<i>min-local</i> := (<i>usable-size</i> - 12) * 255 / 32 - 23
<i>max-local</i> := (<i>usable-size</i> - 35)
<i>N</i> := <i>min-local</i> + (<i>record-size</i> - <i>min-local</i>) % (<i>usable-size</i> - 4)
if( <i>N</i> > <i>max-local</i> ) N := <i>min-local</i>
</code>
<a name="H31200"></a>
<p><b>H31200:</b>
A single <i>overflow page</i> may store up to <i>available-space</i>
bytes of database record data, where <i>available-space</i> is equal
to (<i>usable-size</i> - 4).
<a name="H31210"></a>
<p><b>H31210:</b>
When a database record is too large to store within a B-Tree page
(see H31170 and H31000), a prefix of the record is stored within
the B-Tree page and the remainder stored across <i>N</i> overflow
pages. In this case <i>N</i> is the minimum number of pages required
to store the portion of the record not stored on the B-Tree page,
given the maximum payload per overflow page defined by H31200.
<a name="H31220"></a>
<p><b>H31220:</b>
The list of overflow pages used to store a single database record
are linked together in a singly linked list known as an
<i>overflow chain</i>. The first four bytes of each page except the
last in an <i>overflow chain</i> are used to store the page number
of the next page in the linked list, formatted as an unsigned
big-endian integer. The first four bytes of the last page in an
<i>overflow chain</i> are set to 0x00.
<a name="H31230"></a>
<p><b>H31230:</b>
Each overflow page except the last in an <i>overflow chain</i>
contains <i>N</i> bytes of record data starting at byte offset 4 of
the page, where <i>N</i> is the maximum payload per overflow page,
as defined by H31200. The final page in an <i>overflow chain</i>
contains the remaining data, also starting at byte offset 4.
<a name="H31240"></a>
<p><b>H31240:</b>
All <i>free pages</i> in a <i>well-formed database file</i> are part of
the database <i>free page list</i>.
<a name="H31250"></a>
<p><b>H31250:</b>
Each free page is either a <i>free list trunk</i> page or a
<i>free list leaf</i> page.
<a name="H31260"></a>
<p><b>H31260:</b>
All <i>free list trunk</i> pages are linked together into a singly
linked list. The first 4 bytes of each page in the linked list
contains the page number of the next page in the list, formatted
as an unsigned big-endian integer. The first 4 bytes of the last
page in the linked list are set to 0x00.
<a name="H31270"></a>
<p><b>H31270:</b>
The second 4 bytes of each <i>free list trunk</i> page contains
the number of </i>free list leaf</i> page numbers stored on the free list
trunk page, formatted as an unsigned big-endian integer.
<a name="H31280"></a>
<p><b>H31280:</b>
Beginning at byte offset 8 of each <i>free list trunk</i> page are
<i>N</i> page numbers, each formatted as a 4-byte unsigned big-endian
integers, where <i>N</i> is the value described in requirement H31270.
<a name="H31290"></a>
<p><b>H31290:</b>
All page numbers stored on all <i>free list trunk</i> pages refer to
database pages that are <i>free list leaves</i>.
<a name="H31300"></a>
<p><b>H31300:</b>
The page number of each <i>free list leaf</i> page in a well-formed
database file appears exactly once within the set of pages numbers
stored on <i>free list trunk</i> pages.
<a name="H31310"></a>
<p><b>H31310:</b>
The total number of pages in the free list, including all <i>free list
trunk</i> and <i>free list leaf</i> pages, is stored as a 4-byte unsigned
big-endian integer at offset 36 of the database file header.
<a name="H31320"></a>
<p><b>H31320:</b>
The page number of the first page in the linked list of <i>free list
trunk</i> pages is stored as a 4-byte big-endian unsigned integer at
offset 32 of the database file header. If there are no <i>free list
trunk</i> pages in the database file, then the value stored at
offset 32 of the database file header is 0.
<a name="H31330"></a>
<p><b>H31330:</b>
Non auto-vacuum databases do not contain pointer map pages.
<a name="H31340"></a>
<p><b>H31340:</b>
In an auto-vacuum database file, every <i>(num-entries + 1)</i>th
page beginning with page 2 is designated a pointer-map page, where
<i>num-entries</i> is calculated as:
<code>
<i>num-entries</i> := <i>database-usable-page-size</i> / 5
</code>
<a name="H31350"></a>
<p><b>H31350:</b>
In an auto-vacuum database file, each pointer-map page contains
a pointer map entry for each of the <i>num-entries</i> (defined by
H31340) pages that follow it, if they exist.
<a name="H31360"></a>
<p><b>H31360:</b>
Each pointer-map page entry consists of a 1-byte page type and a
4-byte page parent number, 5 bytes in total.
<a name="H31370"></a>
<p><b>H31370:</b>
Pointer-map entries are packed into the pointer-map page in order,
starting at offset 0. The entry associated with the database
page that immediately follows the pointer-map page is located at
offset 0. The entry for the following page at offset 5 etc.
<a name="H31380"></a>
<p><b>H31380:</b>
For each page except page 1 in an auto-vacuum database file that is
the root page of a B-Tree structure, the page type of the
corresponding pointer-map entry is set to the value 0x01 and the
parent page number is zero.
<a name="H31390"></a>
<p><b>H31390:</b>
For each page that is a part of an auto-vacuum database file free-list,
the page type of the corresponding pointer-map entry is set to the
value 0x02 and the parent page number is zero.
<a name="H31400"></a>
<p><b>H31400:</b>
For each page in a well-formed auto-vacuum database that is the first
page in an overflow chain, the page type of the corresponding
pointer-map entry is set to 0x03 and the parent page number field
is set to the page number of the B-Tree page that contains the start
of the B-Tree cell stored in the overflow-chain.
<a name="H31410"></a>
<p><b>H31410:</b>
For each page that is the second or a subsequent page in an overflow
chain, the page type of the corresponding pointer-map entry is set to
0x04 and the parent page number field is set to the page number of the
preceding page in the overflow chain.
<a name="H31420"></a>
<p><b>H31420:</b>
For each page that is not a root page but is a part of a B-Tree tree
structure (not part of an overflow chain), the page type of the
corresponding pointer-map entry is set to the value 0x05 and the parent
page number field is set to the page number of the parent node in the
B-Tree structure.
<a name="H32000"></a>
<p><b>H32000:</b>
If a <i>journal file</i> contains a well-formed <i>master-journal
pointer</i>, and the named <i>master-journal file</i> either does
not exist or does not contain the name of the <i>journal file</i>,
then the <i>journal file</i> shall be considered invalid.
<a name="H32010"></a>
<p><b>H32010:</b>
If the first 28 bytes of a <i>journal file</i> do not contain a well-formed
<i>journal header</i>, then the <i>journal file</i> shall be considered
invalid.
<a name="H32020"></a>
<p><b>H32020:</b>
If the journal file exists within the file-system and neither H32000
, H32010 nor H33080 apply, then the journal file shall be considered valid.
<a name="H32030"></a>
<p><b>H32030:</b>
If there exists a valid <i>journal file</i> in the file-system, then the
database <i>page-size</i> in bytes used to interpret the <i>database image</i>
shall be the value stored as a 4-byte big-endian unsigned integer at byte
offset 24 of the <i>journal file</i>.
<a name="H32040"></a>
<p><b>H32040:</b>
If there exists a valid <i>journal file</i> in the file-system, then the
number of pages in the <i>database image</i> shall be the value stored as
a 4-byte big-endian unsigned integer at byte offset 24 of the
<i>journal file</i>.
<a name="H32050"></a>
<p><b>H32050:</b>
If there is no valid <i>journal file</i> in the file-system, then the
database <i>page-size</i> in bytes used to interpret the <i>database image</i>
shall be the value stored as a 2-byte big-endian unsigned integer at byte
offset 16 of the <i>database file</i>.
<a name="H32060"></a>
<p><b>H32060:</b>
If there is no valid <i>journal file</i> in the file-system, then the
number of pages in the <i>database image</i> shall be calculated by dividing
the size of the <i>database file</i> in bytes by the database <i>page-size</i>.
<a name="H32070"></a>
<p><b>H32070:</b>
If there exists a valid <i>journal file</i> in the file-system, then the
contents of each page of the <i>database image</i> for which there is a valid
<i>journal record</i> in the <i>journal file</i> shall be read from the
corresponding journal record.
<a name="H32080"></a>
<p><b>H32080:</b>
The contents of all <i>database image</i> pages for which there is no valid
<i>journal record</i> shall be read from the database file.
<a name="H32090"></a>
<p><b>H32090:</b>
A buffer of 28 bytes shall be considered a well-formed journal
header if it is not excluded by requirements H32180, H32190 or H32200.
<a name="H32180"></a>
<p><b>H32180:</b>
A buffer of 28 bytes shall only be considered a well-formed journal
header if the first eight bytes of the buffer contain the values 0xd9,
0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63, and 0xd7, respectively.
<a name="H32190"></a>
<p><b>H32190:</b>
A buffer of 28 bytes shall only be considered a well-formed journal
header if the value stored in the sector size field (the 4-byte big-endian
unsigned integer at offset 20 of the buffer) contains a value that
is an integer power of two greater than 512.
<a name="H32200"></a>
<p><b>H32200:</b>
A buffer of 28 bytes shall only be considered a well-formed journal
header if the value stored in the page size field (the 4-byte big-endian
unsigned integer at offset 24 of the buffer) contains a value that
is an integer power of two greater than 512.
<a name="H32100"></a>
<p><b>H32100:</b>
A buffer of (8 + page size) bytes shall be considered a well-formed journal
record if it is not excluded by requirements H32110 or H32120.
<a name="H32110"></a>
<p><b>H32110:</b>
A journal record shall only be considered to be well-formed if the page number
field contains a value other than zero and the locking-page number, calculated
using the page size found in the first journal header of the journal file that
contains the journal record.
<a name="H32120"></a>
<p><b>H32120:</b>
A journal record shall only be considered to be well-formed if the checksum
field contains a value equal to the sum of the value stored in the
checksum-initializer field of the journal header that precedes the record
and the value stored in every 200th byte of the page data field, interpreted
as an 8-bit unsigned integer), starting with byte offset (page-size % 200) and
ending with the byte at byte offset (page-size - 200).
<a name="H32130"></a>
<p><b>H32130:</b>
A buffer shall be considered to contain a well-formed master journal pointer
record if it is not excluded from this category by requirements H32140,
H32150, H32160 or H32170.
<a name="H32140"></a>
<p><b>H32140:</b>
A buffer shall only be considered to be a well-formed master journal pointer
if the final eight bytes of the buffer contain the values 0xd9, 0xd5, 0x05,
0xf9, 0x20, 0xa1, 0x63, and 0xd7, respectively.
<a name="H32150"></a>
<p><b>H32150:</b>
A buffer shall only be considered to be a well-formed master journal pointer
if the size of the buffer in bytes is equal to the value stored as a 4-byte
big-endian unsigned integer starting 16 bytes before the end of the buffer.
<a name="H32160"></a>
<p><b>H32160:</b>
A buffer shall only be considered to be a well-formed master journal pointer
if the first four bytes of the buffer, interpreted as a big-endian unsigned
integer, contain the page number of the locking page (the value
(1 + 2<sup>30</sup> / page-size), where page-size is the value stored in
the page-size field of the first journal header of the journal file).
<a name="H32170"></a>
<p><b>H32170:</b>
A buffer shall only be considered to be a well-formed master journal pointer
if the value stored as a 4-byte big-endian integer starting 12 bytes before
the end of the buffer is equal to the sum of all bytes, each interpreted
as an 8-bit unsigned integer, starting at offset 4 of the buffer and continuing
until offset (buffer-size - 16) (the 17th last byte of the buffer).
<a name="H32210"></a>
<p><b>H32210:</b>
A buffer shall be considered to contain a well-formed journal section
if it is not excluded from this category by requirements H32220, H32230 or
H32240.
<a name="H32220"></a>
<p><b>H32220:</b>
A buffer shall only be considered to contain a well-formed journal section
if the first 28 bytes of it contain a well-formed journal header.
<a name="H32230"></a>
<p><b>H32230:</b>
A buffer shall only be considered to contain a well-formed journal section
if, beginning at byte offset sector-size, it contains a sequence of
record-count well-formed journal records. In this case sector-size and
record-count are the integer values stored in the sector size and record
count fields of the journal section's journal header.
<a name="H32240"></a>
<p><b>H32240:</b>
A buffer shall only be considered to contain a well-formed journal section
if it is an integer multiple of sector-size bytes in size, where sector-size
is the value stored in the sector size field of the journal section's journal
header.
<a name="H32250"></a>
<p><b>H32250:</b>
A journal record found within a valid journal file shall be considered a valid
journal record if it is not excluded from this category by requirement H32260,
H32270 or H32280.
<a name="H32260"></a>
<p><b>H32260:</b>
A journal record shall only be considered a valid journal record if it and any
other journal records that occur before it within the same journal section are
well-formed.
<a name="H32270"></a>
<p><b>H32270:</b>
A journal record shall only be considered a valid journal record if the journal
section to which it belongs begins with a well-formed journal header.
<a name="H32280"></a>
<p><b>H32280:</b>
A journal record shall only be considered a valid journal record if all journal
sections that occur before the journal section containing the journal record
are well-formed journal sections.
<a name="H32290"></a>
<p><b>H32290:</b>
Two database images shall be considered to be equivalent if they (a) have the
same page size, (b) contain the same number of pages and (c) the content of
each page of the first database image that is not a free-list leaf page is
the same as the content of the corresponding page in the second database image.
<a name="H32300"></a>
<p><b>H32300:</b>
When writing to an SQLite database file-system representation in order to
replace database image A with database image B, the file-system representation
shall at all times contain a database image equivalent to either A or B.
<a name="H32310"></a>
<p><b>H32310:</b>
If, while writing to an SQLite database file-system representation in
order to replace database image A with database image B, an operating
system or power failure should occur, then following recovery the database
file-system representation shall contain a database image equivalent to
either A or B.
<a name="H32320"></a>
<p><b>H32320:</b>
When using the rollback-journal method to modify the file-system representation
of a database image, the database writer shall ensure that before the size of
the database file is modified, the first 28 bytes of the journal file contain a
stable valid journal header with the page-size and page-count fields set to
values corresponding to the original database image.
<a name="H32330"></a>
<p><b>H32330:</b>
When using the rollback-journal method to modify the file-system representation
of a database image, the database writer shall ensure that the first 28 bytes
of the journal file does not become unstable at any point after the size of the
database file is modified until the journal file is invalidated to commit the
transaction.
<a name="H32340"></a>
<p><b>H32340:</b>
When using the rollback-journal method to modify the file-system representation
of a database image, the database writer shall ensure that before any part of
the database file that contained a page of the original database image that was
not a free-list leaf page is overwritten or made unstable the journal file
contains a valid and stable journal record containing the original page data.
<a name="H32350"></a>
<p><b>H32350:</b>
When using the rollback-journal method to modify the file-system representation
of a database image, the database writer shall ensure that after any part of
the database file that contained a page of the original database image that was
not a free-list leaf page has been overwritten or made unstable the corresponding
journal record (see H32340) is not modified or made unstable.
<a name="H32360"></a>
<p><b>H32360:</b>
When using the rollback-journal method to modify the file-system representation
of a database image, the database writer shall ensure that before the database
file is truncated, the journal file contains stable valid journal records
corresponding to all pages of the original database image that were part of the
region being discarded by the truncate operation and were not free-list leaf
pages.
<a name="H32370"></a>
<p><b>H32370:</b>
When using the rollback-journal method to modify the file-system representation
of a database image, the database writer shall ensure that after the database
file has been truncated the journal records corresponding to pages from the
original database image that were part of the truncated region and were not
free-list leaf pages are not modified or made unstable.
<a name="H33000"></a>
<p><b>H33000:</b>
Before reading from a database file , a database reader shall establish a
SHARED or greater lock on the database file-system representation.
<a name="H33010"></a>
<p><b>H33010:</b>
Before writing to a database file, a database writer shall establish
an EXCLUSIVE lock on the database file-system representation.
<a name="H33020"></a>
<p><b>H33020:</b>
Before writing to a journal file, a database writer shall establish
a RESERVED, PENDING or EXCLUSIVE lock on the database file-system
representation.
<a name="H33030"></a>
<p><b>H33030:</b>
Before establishing a RESERVED or PENDING lock on a database file, a
database writer shall ensure that the database file contains a valid
database image.
<a name="H33060"></a>
<p><b>H33060:</b>
Before establishing a RESERVED or PENDING lock on a database file, a
database writer shall ensure that any journal file that may be present
is not a valid journal file.
<a name="H33080"></a>
<p><b>H33080:</b>
If another database client holds either a RESERVED or PENDING lock on the
database file-system representation, then any journal file that exists within
the file system shall be considered invalid.
<a name="H33040"></a>
<p><b>H33040:</b>
A database writer shall increment the value of the database header change
counter field (H30100) either as part of the first database image modification
that it performs after obtaining an EXCLUSIVE lock.
<a name="H33050"></a>
<p><b>H33050:</b>
A database writer shall increment the value of the database schema version
field (H30110) as part of the first database image modification that includes
a schema change that it performs after obtaining an EXCLUSIVE lock.
<a name="H33070"></a>
<p><b>H33070:</b>
If a database writer is required by either H33050 or H33040 to increment a
database header field, and that header field already contains the maximum
value possible (0xFFFFFFFF, or 4294967295 for 32-bit unsigned integer
fields), "incrementing" the field shall be interpreted to mean setting it to
zero.
<a name="H35010"></a>
<p><b>H35010:</b>
Except for the read operation required by H35070 and those reads made
as part of opening a read-only transaction, SQLite shall ensure that
a <i>database connection</i> has an open read-only or read/write
transaction when any data is read from the <i>database file</i>.
<a name="H35020"></a>
<p><b>H35020:</b>
Aside from those read operations described by H35070 and H21XXX, SQLite
shall read data from the database file in aligned blocks of
<i>page-size</i> bytes, where <i>page-size</i> is the database page size
used by the database file.
<a name="H35030"></a>
<p><b>H35030:</b>
While opening a <i>read-only transaction</i>, after successfully
obtaining a <i>shared lock</i> on the database file, SQLite shall
attempt to detect and roll back a <i>hot journal file</i> associated
with the same database file.
<a name="H35040"></a>
<p><b>H35040:</b>
Assuming no errors have occured, then after attempting to detect and
roll back a <i>hot journal file</i>, if the <i>page cache</i> contains
any entries associated with the current <i>database connection</i>,
then SQLite shall validate the contents of the <i>page cache</i> by
testing the <i>file change counter</i>. This procedure is known as
<i>cache validiation</i>.
<a name="H35050"></a>
<p><b>H35050:</b>
If the cache validiate procedure prescribed by H35040 is required and
does not prove that the <i>page cache</i> entries associated with the
current <i>database connection</i> are valid, then SQLite shall discard
all entries associated with the current <i>database connection</i> from
the <i>page cache</i>.
<a name="H35060"></a>
<p><b>H35060:</b>
When a new <i>database connection</i> is required, SQLite shall attempt
to open a file-handle on the database file. If the attempt fails, then
no new <i>database connection</i> is created and an error returned.
<a name="H35070"></a>
<p><b>H35070:</b>
When a new <i>database connection</i> is required, after opening the
new file-handle, SQLite shall attempt to read the first 100 bytes
of the database file. If the attempt fails for any other reason than
that the opened file is less than 100 bytes in size, then
the file-handle is closed, no new <i>database connection</i> is created
and an error returned instead.
<a name="H35080"></a>
<p><b>H35080:</b>
If the <i>database file header</i> is successfully read from a newly
opened database file, the connections <i>expected page-size</i> shall
be set to the value stored in the <i>page-size field</i> of the
database header.
<a name="H35090"></a>
<p><b>H35090:</b>
If the <i>database file header</i> cannot be read from a newly opened
database file (because the file is less than 100 bytes in size), the
connections <i>expected page-size</i> shall be set to the compile time
value of the SQLITE_DEFAULT_PAGESIZE option.
<a name="H35100"></a>
<p><b>H35100:</b>
When required to open a <i>read-only transaction</i> using a
<i>database connection</i>, SQLite shall first attempt to obtain
a <i>shared-lock</i> on the file-handle open on the database file.
<a name="H35110"></a>
<p><b>H35110:</b>
If, while opening a <i>read-only transaction</i>, SQLite fails to obtain
the <i>shared-lock</i> on the database file, then the process is
abandoned, no transaction is opened and an error returned to the user.
<a name="H35120"></a>
<p><b>H35120:</b>
If, while opening a <i>read-only transaction</i>, SQLite encounters
an error while attempting to detect or roll back a <i>hot journal
file</i>, then the <i>shared-lock</i> on the database file is released,
no transaction is opened and an error returned to the user.
<a name="H35130"></a>
<p><b>H35130:</b>
When required to end a <i>read-only transaction</i>, SQLite shall
relinquish the <i>shared lock</i> held on the database file by
calling the xUnlock() method of the file-handle.
<a name="H35140"></a>
<p><b>H35140:</b>
When required to attempt to detect a <i>hot-journal file</i>, SQLite
shall first use the xAccess() method of the VFS layer to check if a
journal file exists in the file-system.
<a name="H35150"></a>
<p><b>H35150:</b>
When required to attempt to detect a <i>hot-journal file</i>, if the
call to xAccess() required by H35140 indicates that a journal file does
not exist, then SQLite shall conclude that there is no <i>hot-journal
file</i> in the file system and therefore that no <i>hot journal
rollback</i> is required.
<a name="H35160"></a>
<p><b>H35160:</b>
When required to attempt to detect a <i>hot-journal file</i>, if the
call to xAccess() required by H35140 indicates that a journal file
is present, then the xCheckReservedLock() method of the database file
file-handle is invoked to determine whether or not some other
process is holding a <i>reserved</i> or greater lock on the database
file.
<a name="H35170"></a>
<p><b>H35170:</b>
If the call to xCheckReservedLock() required by H35160 indicates that
some other <i>database connection</i> is holding a <i>reserved</i>
or greater lock on the database file, then SQLite shall conclude that
there is no <i>hot journal file</i>. In this case the attempt to detect
a <i>hot journal file</i> is concluded.
<a name="H35180"></a>
<p><b>H35180:</b>
When a file-handle open on a database file is unlocked, if the
<i>page cache</i> contains one or more entries belonging to the
associated <i>database connection</i>, SQLite shall store the value
of the <i>file change counter</i> internally.
<a name="H35190"></a>
<p><b>H35190:</b>
When required to perform <i>cache validation</i> as part of opening
a <i>read transaction</i>, SQLite shall read a 16 byte block
starting at byte offset 24 of the <i>database file</i> using the xRead()
method of the <i>database connections</i> file handle.
<a name="H35200"></a>
<p><b>H35200:</b>
While performing <i>cache validation</i>, after loading the 16 byte
block as required by H35190, SQLite shall compare the 32-bit big-endian
integer stored in the first 4 bytes of the block to the most
recently stored value of the <i>file change counter</i> (see H35180).
If the values are not the same, then SQLite shall conclude that
the contents of the cache are invalid.
<a name="H35210"></a>
<p><b>H35210:</b>
During the conclusion of a <i>read transaction</i>, before unlocking
the database file, SQLite shall set the connections
<i>expected page size</i> to the current database <i>page-size</i>.
<a name="H35220"></a>
<p><b>H35220:</b>
As part of opening a new <i>read transaction</i>, immediately after
performing <i>cache validation</i>, if there is no data for database
page 1 in the <i>page cache</i>, SQLite shall read <i>N</i> bytes from
the start of the database file using the xRead() method of the
connections file handle, where <i>N</i> is the connections current
<i>expected page size</i> value.
<a name="H35230"></a>
<p><b>H35230:</b>
If page 1 data is read as required by H35230, then the value of the
<i>page-size</i> field that appears in the database file header that
consumes the first 100 bytes of the read block is not the same as the
connections current <i>expected page size</i>, then the
<i>expected page size</i> is set to this value, the database file is
unlocked and the entire procedure to open a <i>read transaction</i>
is repeated.
<a name="H35240"></a>
<p><b>H35240:</b>
If page 1 data is read as required by H35230, then the value of the
<i>page-size</i> field that appears in the database file header that
consumes the first 100 bytes of the read block is the same as the
connections current <i>expected page size</i>, then the block of data
read is stored in the <i>page cache</i> as page 1.
<a name="H35270"></a>
<p><b>H35270:</b>
When required to <i>journal a database page</i>, SQLite shall first
append the <i>page number</i> of the page being journalled to the
<i>journal file</i>, formatted as a 4-byte big-endian unsigned integer,
using a single call to the xWrite method of the file-handle opened
on the journal file.
<a name="H35280"></a>
<p><b>H35280:</b>
When required to <i>journal a database page</i>, if the attempt to
append the <i>page number</i> to the journal file is successful,
then the current page data (<i>page-size</i> bytes) shall be appended
to the journal file, using a single call to the xWrite method of the
file-handle opened on the journal file.
<a name="H35290"></a>
<p><b>H35290:</b>
When required to <i>journal a database page</i>, if the attempt to
append the current page data to the journal file is successful,
then SQLite shall append a 4-byte big-endian integer checksum value
to the to the journal file, using a single call to the xWrite method
of the file-handle opened on the journal file.
<a name="H35300"></a>
<p><b>H35300:</b>
The checksum value written to the <i>journal file</i> by the write
required by H35290 shall be equal to the sum of the <i>checksum
initializer</i> field stored in the <i>journal header</i> (H35700) and
every 200th byte of the page data, beginning with the
(<i>page-size</i> % 200)th byte.
<a name="H35350"></a>
<p><b>H35350:</b>
When required to open a <i>write transaction</i> on the database,
SQLite shall first open a <i>read transaction</i>, if the <i>database
connection</i> in question has not already opened one.
<a name="H35360"></a>
<p><b>H35360:</b>
When required to open a <i>write transaction</i> on the database, after
ensuring a <i>read transaction</i> has already been opened, SQLite
shall obtain a <i>reserved lock</i> on the database file by calling
the xLock method of the file-handle open on the database file.
<a name="H35370"></a>
<p><b>H35370:</b>
When required to open a <i>write transaction</i> on the database, after
obtaining a <i>reserved lock</i> on the database file, SQLite shall
open a read/write file-handle on the corresponding <i>journal file</i>.
<a name="H35380"></a>
<p><b>H35380:</b>
When required to open a <i>write transaction</i> on the database, after
opening a file-handle on the <i>journal file</i>, SQLite shall append
a <i>journal header</i> to the (currently empty) <i>journal file</i>.
<a name="H35400"></a>
<p><b>H35400:</b>
When a <i>database connection</i> is closed, SQLite shall close the
associated file handle at the VFS level.
<a name="H35420"></a>
<p><b>H35420:</b>
SQLite shall ensure that a <i>database connection</i> has an open
read-only or read/write transaction before using data stored in the <i>page
cache</i> to satisfy user queries.
<a name="H35430"></a>
<p><b>H35430:</b>
When a <i>database connection</i> is closed, all associated <i>page
cache</i> entries shall be discarded.
<a name="H35440"></a>
<p><b>H35440:</b>
If while attempting to detect a <i>hot-journal file</i> the call to
xCheckReservedLock() indicates that no process holds a <i>reserved</i>
or greater lock on the <i>database file</i>, then SQLite shall open
a file handle on the potentially hot journal file using the VFS xOpen()
method.
<a name="H35450"></a>
<p><b>H35450:</b>
After successfully opening a file-handle on a potentially hot journal
file, SQLite shall query the file for its size in bytes using the
xFileSize() method of the open file handle.
<a name="H35460"></a>
<p><b>H35460:</b>
If the size of a potentially hot journal file is revealed to be zero
bytes by a query required by H35450, then SQLite shall close the
file handle opened on the journal file and delete the journal file using
a call to the VFS xDelete() method. In this case SQLite shall conclude
that there is no <i>hot journal file</i>.
<a name="H35470"></a>
<p><b>H35470:</b>
If the size of a potentially hot journal file is revealed to be greater
than zero bytes by a query required by H35450, then SQLite shall attempt
to upgrade the <i>shared lock</i> held by the <i>database connection</i>
on the <i>database file</i> directly to an <i>exclusive lock</i>.
<a name="H35480"></a>
<p><b>H35480:</b>
If an attempt to upgrade to an <i>exclusive lock</i> prescribed by
H35470 fails for any reason, then SQLite shall release all locks held by
the <i>database connection</i> and close the file handle opened on the
<i>journal file</i>. The attempt to open a <i>read-only transaction</i>
shall be deemed to have failed and an error returned to the user.
<a name="H35490"></a>
<p><b>H35490:</b>
If, as part of the <i>hot journal file</i> detection process, the
attempt to upgrade to an <i>exclusive lock</i> mandated by H35470 is
successful, then SQLite shall query the file-system using the xAccess()
method of the VFS implementation to test whether or not the journal
file is still present in the file-system.
<a name="H35500"></a>
<p><b>H35500:</b>
If the xAccess() query required by H35490 reveals that the journal
file is still present in the file system, then SQLite shall conclude
that the journal file is a <i>hot journal file</i> that needs to
be rolled back. SQLite shall immediately begin <i>hot journal
rollback</i>.
<a name="H35510"></a>
<p><b>H35510:</b>
If the call to xAccess() required by H35140 fails (due to an IO error or
similar), then SQLite shall abandon the attempt to open a <i>read-only
transaction</i>, relinquish the <i>shared lock</i> held on the database
file and return an error to the user.
<a name="H35520"></a>
<p><b>H35520:</b>
If the call to xCheckReservedLock() required by H35160 fails (due to an
IO or other internal VFS error), then SQLite shall abandon the attempt
to open a <i>read-only transaction</i>, relinquish the <i>shared lock</i>
held on the database file and return an error to the user.
<a name="H35530"></a>
<p><b>H35530:</b>
If the call to xOpen() required by H35440 fails (due to an IO or other
internal VFS error), then SQLite shall abandon the attempt to open a
<i>read-only transaction</i>, relinquish the <i>shared lock</i> held on
the database file and return an error to the user.
<a name="H35540"></a>
<p><b>H35540:</b>
If the call to xFileSize() required by H35450 fails (due to an IO or
other internal VFS error), then SQLite shall abandon the attempt to open
a <i>read-only transaction</i>, relinquish the <i>shared lock</i> held on
the database file, close the file handle opened on the journal file and
return an error to the user.
<a name="H35550"></a>
<p><b>H35550:</b>
If the call to xDelete() required by H35450 fails (due to an IO or
other internal VFS error), then SQLite shall abandon the attempt to open
a <i>read-only transaction</i>, relinquish the <i>shared lock</i> held on
the database file and return an error to the user.
<a name="H35560"></a>
<p><b>H35560:</b>
If the call to xAccess() required by H35490 fails (due to an IO or
other internal VFS error), then SQLite shall abandon the attempt to open
a <i>read-only transaction</i>, relinquish the lock held on the
database file, close the file handle opened on the journal file and
return an error to the user.
<a name="H35570"></a>
<p><b>H35570:</b>
If the call to xAccess() required by H35490 reveals that the journal
file is no longer present in the file system, then SQLite shall abandon
the attempt to open a <i>read-only transaction</i>, relinquish the
lock held on the database file, close the file handle opened on the
journal file and return an SQLITE_BUSY error to the user.
<a name="H35580"></a>
<p><b>H35580:</b>
If an attempt to acquire a <i>reserved lock</i> prescribed by
requirement H35360 fails, then SQLite shall deem the attempt to
open a <i>write transaction</i> to have failed and return an error
to the user.
<a name="H35590"></a>
<p><b>H35590:</b>
When required to modify the contents of an existing database page that
existed and was not a <i>free-list leaf page</i> when the <i>write
transaction</i> was opened, SQLite shall journal the page if it has not
already been journalled within the current <i>write transaction</i>.
<a name="H35600"></a>
<p><b>H35600:</b>
When required to modify the contents of an existing database page,
SQLite shall update the cached version of the database page content
stored as part of the <i>page cache entry</i> associated with the page.
<a name="H35610"></a>
<p><b>H35610:</b>
When required to append a new database page to the database file,
SQLite shall create a new <i>page cache entry</i> corresponding to
the new page and insert it into the <i>page cache</i>. The <i>dirty
flag</i> of the new <i>page cache entry</i> shall be set.
<a name="H35620"></a>
<p><b>H35620:</b>
When required to truncate (remove) a database page that existed and was
not a <i>free-list leaf page</i> when the <i>write transaction</i> was
opened from the end of a database file, SQLite shall journal the page if
it has not already been journalled within the current <i>write
transaction</i>.
<a name="H35630"></a>
<p><b>H35630:</b>
When required to truncate a database page from the end of the database
file, SQLite shall discard the associated <i>page cache entry</i>
from the page cache.
<a name="H35640"></a>
<p><b>H35640:</b>
When required to purge a <i>non-writable dirty page</i> from the
<i>page cache</i>, SQLite shall <i>sync the journal file</i> before
proceding with the write operation required by H35670.
<a name="H35660"></a>
<p><b>H35660:</b>
After <i>syncing the journal file</i> as required by H35640, SQLite
shall append a new <i>journal header</i> to the <i>journal file</i>
before proceding with the write operation required by H35670.
<a name="H35670"></a>
<p><b>H35670:</b>
When required to purge a <i>page cache entry</i> that is a
<i>dirty page</i> SQLite shall write the page data into the database
file, using a single call to the xWrite method of the <i>database
connection</i> file handle.
<a name="H35680"></a>
<p><b>H35680:</b>
When required to append a <i>journal header</i> to the <i>journal
file</i>, SQLite shall do so by writing a block of <i>sector-size</i>
bytes using a single call to the xWrite method of the file-handle
open on the <i>journal file</i>. The block of data written shall begin
at the smallest sector-size aligned offset at or following the current
end of the <i>journal file</i>.
<a name="H35690"></a>
<p><b>H35690:</b>
The first 8 bytes of the <i>journal header</i> required to be written
by H35680 shall contain the following values, in order from byte offset 0
to 7: 0xd9, 0xd5, 0x05, 0xf9, 0x20, 0xa1, 0x63 and 0xd7.
<a name="H35700"></a>
<p><b>H35700:</b>
Bytes 8-11 of the <i>journal header</i> required to be written by
H35680 shall contain 0x00.
<a name="H35710"></a>
<p><b>H35710:</b>
Bytes 12-15 of the <i>journal header</i> required to be written by
H35680 shall contain the number of pages that the database file
contained when the current <i>write-transaction</i> was started,
formatted as a 4-byte big-endian unsigned integer.
<a name="H35720"></a>
<p><b>H35720:</b>
Bytes 16-19 of the <i>journal header</i> required to be written by
H35680 shall contain pseudo-randomly generated values.
<a name="H35730"></a>
<p><b>H35730:</b>
Bytes 20-23 of the <i>journal header</i> required to be written by
H35680 shall contain the <i>sector size</i> used by the VFS layer,
formatted as a 4-byte big-endian unsigned integer.
<a name="H35740"></a>
<p><b>H35740:</b>
Bytes 24-27 of the <i>journal header</i> required to be written by
H35680 shall contain the <i>page size</i> used by the database at
the start of the <i>write transaction</i>, formatted as a 4-byte
big-endian unsigned integer.
<a name="H35750"></a>
<p><b>H35750:</b>
When required to <i>sync the journal file</i>, SQLite shall invoke the
xSync method of the file handle open on the <i>journal file</i>.
<a name="H35760"></a>
<p><b>H35760:</b>
When required to <i>sync the journal file</i>, after invoking the
xSync method as required by H35750, SQLite shall update the <i>record
count</i> of the <i>journal header</i> most recently written to the
<i>journal file</i>. The 4-byte field shall be updated to contain
the number of <i>journal records</i> that have been written to the
<i>journal file</i> since the <i>journal header</i> was written,
formatted as a 4-byte big-endian unsigned integer.
<a name="H35770"></a>
<p><b>H35770:</b>
When required to <i>sync the journal file</i>, after updating the
<i>record count</i> field of a <i>journal header</i> as required by
H35760, SQLite shall invoke the xSync method of the file handle open
on the <i>journal file</i>.
<a name="H35780"></a>
<p><b>H35780:</b>
When required to upgrade to an <i>exclusive lock</i> as part of a write
transaction, SQLite shall first attempt to obtain a <i>pending lock</i>
on the database file if one is not already held by invoking the xLock
method of the file handle opened on the <i>database file</i>.
<a name="H35790"></a>
<p><b>H35790:</b>
When required to upgrade to an <i>exclusive lock</i> as part of a write
transaction, after successfully obtaining a <i>pending lock</i> SQLite
shall attempt to obtain an <i>exclusive lock</i> by invoking the
xLock method of the file handle opened on the <i>database file</i>.
<a name="H35800"></a>
<p><b>H35800:</b>
When required to <i>commit a write-transaction</i>, SQLite shall
modify page 1 to increment the value stored in the <i>change counter</i>
field of the <i>database file header</i>.
<a name="H35810"></a>
<p><b>H35810:</b>
When required to <i>commit a write-transaction</i>, after incrementing
the <i>change counter</i> field, SQLite shall <i>sync the journal
file</i>.
<a name="H35820"></a>
<p><b>H35820:</b>
When required to <i>commit a write-transaction</i>, after <i>syncing
the journal file</i> as required by H35810, if an <i>exclusive lock</i>
on the database file is not already held, SQLite shall attempt to
<i>upgrade to an exclusive lock</i>.
<a name="H35830"></a>
<p><b>H35830:</b>
When required to <i>commit a write-transaction</i>, after <i>syncing
the journal file</i> as required by H35810 and ensuring that an
<i>exclusive lock</i> is held on the database file as required by
H35830, SQLite shall copy the contents of all <i>dirty page</i>
stored in the <i>page cache</i> into the <i>database file</i> using
calls to the xWrite method of the <i>database connection</i> file
handle. Each call to xWrite shall write the contents of a single
<i>dirty page</i> (<i>page-size</i> bytes of data) to the database
file. Dirty pages shall be written in order of <i>page number</i>,
from lowest to highest.
<a name="H35840"></a>
<p><b>H35840:</b>
When required to <i>commit a write-transaction</i>, after copying the
contents of any <i>dirty pages</i> to the database file as required
by H35830, SQLite shall sync the database file by invoking the xSync
method of the <i>database connection</i> file handle.
<a name="H35850"></a>
<p><b>H35850:</b>
When required to <i>commit a write-transaction</i>, after syncing
the database file as required by H35840, SQLite shall close the
file-handle opened on the <i>journal file</i> and delete the
<i>journal file</i> from the file system via a call to the VFS
xDelete method.
<a name="H35860"></a>
<p><b>H35860:</b>
When required to <i>commit a write-transaction</i>, after deleting
the <i>journal file</i> as required by H35850, SQLite shall relinquish
all locks held on the <i>database file</i> by invoking the xUnlock
method of the <i>database connection</i> file handle.
<hr><small><i>
This page last modified 2009/02/19 14:35:32 UTC
</i></small></div></body></html>
| josejamilena/pfc-jose | 1 - analisis de requisitos/SQLite/sqlite-3_6_14-docs/hlr30000.html | HTML | apache-2.0 | 78,228 |
---
title: Schnapper Rock B
subtitle: Schnapper Rock Rd, Albany
layout: default
modal-id: 3
date: 2015-7-1
thumbnail: dreams-thumbnail.png
bedroom: 5
bathroom: 3
livingroom: 3
parking: 2
landarea: 786
floorarea: 370
description: Sold.
images:
- url: assets/img/portfolio/238csrlo1.jpg
alt: Schnapper Rock
title: Schnapper Rock Rd, Albany
- url: assets/img/portfolio/238csro2.jpg
alt: Schnapper Rock
title: Schnapper Rock Rd, Albany
- url: assets/img/portfolio/238csrl3.jpg
alt: Schnapper Rock
title: Schnapper Rock Rd, Albany
- url: assets/img/portfolio/238csrl6.jpg
alt: Schnapper Rock
title: Schnapper Rock Rd, Albany
- url: assets/img/portfolio/238csrbd2.jpg
alt: Schnapper Rock
title: Schnapper Rock Rd, Albany
- url: assets/img/portfolio/238csrb2.jpg
alt: Schnapper Rock
title: Schnapper Rock Rd, Albany
---
| loql/ross-homes | _posts/2014-07-16-project-3.markdown | Markdown | apache-2.0 | 882 |
package cn.xishan.oftenporter.porter.core.init;
import cn.xishan.oftenporter.porter.core.advanced.IConfigData;
import com.alibaba.fastjson.JSON;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.util.*;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
/**
* @author Created by https://github.com/CLovinr on 2018-12-21.
*/
public class DealSharpProperties
{
private static final Logger LOGGER = LoggerFactory.getLogger(DealSharpProperties.class);
private static class PropOne
{
private String propKey,
originValue;
private int startIndex, endIndex;
public PropOne(String propKey, String originValue, int startIndex, int endIndex)
{
this.propKey = propKey;
this.originValue = originValue;
this.startIndex = startIndex;
this.endIndex = endIndex;
}
public String getPropKey()
{
return propKey;
}
public String replace(String propValue)
{
String str = originValue.substring(0, startIndex) + propValue + originValue.substring(endIndex);
return str;
}
}
/**
* 替换所有的#{propertyName}.
*
* @param string
* @param properties
* @param forEmpty 如果不为null,则用于替换所有不存在的属性。
* @return
*/
public static String replaceSharpProperties(String string, Map<String, ?> properties, String forEmpty)
{
for (Map.Entry<String, ?> entry : properties.entrySet())
{
if (string.contains("#{" + entry.getKey() + "}"))
{
String rs;
// if (entry.getValue() instanceof Map || entry.getValue() instanceof Collection)
// {
// rs = JSON.toJSONString(entry.getValue());
// } else
// {
// rs = String.valueOf(entry.getValue());
// }
if (entry.getValue() instanceof CharSequence)
{
rs = String.valueOf(entry.getValue());
} else if (entry.getValue() == null)
{
rs = "";
} else
{
rs = JSON.toJSONString(entry.getValue());
}
string = string.replace("#{" + entry.getKey() + "}", rs);
}
}
if (forEmpty != null)
{
string = string.replaceAll("#\\{[^{}]+\\}", forEmpty);//去掉未设置的
}
return string;
}
/**
* 替换#{properName}变量。
*
* @param srcMap 待替换属性值的map
* @param propertiesMap 提供属性的map
*/
public static void dealSharpProperties(Map srcMap, Map propertiesMap)
{
dealSharpProperties(srcMap, propertiesMap, false);
}
/**
* 替换#{properName}变量。
*
* @param srcMap 待替换属性值的map
* @param propertiesMap 提供属性的map
* @param keepNotFound 是否保留未找到的变量。
*/
public static void dealSharpProperties(Map srcMap, Map propertiesMap, boolean keepNotFound)
{
Set<String> containsVar = null;
boolean isFirst = true;
boolean hasSet = true;
//处理properties
while (hasSet)
{
hasSet = false;
Collection<String> nameCollection;
if (isFirst)
{
nameCollection = srcMap.keySet();
} else
{
nameCollection = containsVar;
}
containsVar = new HashSet<>();
for (String properName : nameCollection)
{
Object value = srcMap.get(properName);
if (!(value instanceof CharSequence))
{
continue;
}
String valueString = String.valueOf(value);
PropOne propOne = getPropertiesKey(String.valueOf(valueString));
if (propOne != null && propOne.getPropKey().equals(properName))
{
throw new RuntimeException(
"can not set property of \"" + properName + "\" with value \"" + valueString + "\",prop name eq value attr name");
} else if (propOne != null)
{
containsVar.add(properName);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},replace-attr={},origin-value={}", properName,
propOne.getPropKey(), valueString);
}
String replaceStr = null;
if (propertiesMap.containsKey(propOne.getPropKey()))
{
replaceStr = String.valueOf(propertiesMap.get(propOne.getPropKey()));
} else
{
if (keepNotFound)
{
containsVar.remove(properName);
} else
{
replaceStr = "";
LOGGER.warn("proper value with key '{}' is empty", propOne.getPropKey());
}
}
if (replaceStr != null)
{
String newValue = propOne.replace(replaceStr);
srcMap.put(properName, newValue);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},new-value={}", properName, newValue);
}
}
hasSet = true;
}
}
isFirst = false;
}
}
static void dealProperties(IConfigData configData)
{
Set<String> containsVar = null;
boolean isFirst = true;
boolean hasSet = true;
//处理properties
while (hasSet)
{
hasSet = false;
Collection<String> nameCollection;
if (isFirst)
{
nameCollection = configData.propertyNames();
} else
{
nameCollection = containsVar;
}
containsVar = new HashSet<>();
for (String properName : nameCollection)
{
Object value = configData.get(properName);
if (!(value instanceof CharSequence))
{
continue;
}
String valueString = String.valueOf(value);
PropOne propOne = getPropertiesKey(String.valueOf(valueString));
if (propOne != null && propOne.getPropKey().equals(properName))
{
throw new RuntimeException(
"can not set property of " + properName + " with value \"" + valueString + "\"");
} else if (propOne != null)
{
containsVar.add(properName);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},replace-attr={},origin-value={}", properName,
propOne.getPropKey(), valueString);
}
String replaceStr;
if (configData.contains(propOne.getPropKey()))
{
replaceStr = configData.getString(propOne.getPropKey());
} else
{
replaceStr = "";
LOGGER.warn("proper value with key '{}' is empty", propOne.getPropKey());
}
String newValue = propOne.replace(replaceStr);
configData.set(properName, newValue);
if (LOGGER.isDebugEnabled())
{
LOGGER.debug("replace sharp property:key={},new-value={}", properName, newValue);
}
hasSet = true;
}
}
isFirst = false;
}
}
private static final Pattern PROPERTIES_PATTERN = Pattern.compile("#\\{([^{}]+)}");
private static PropOne getPropertiesKey(String value)
{
Matcher matcher = PROPERTIES_PATTERN.matcher(value);
if (matcher.find())
{
PropOne propOne = new PropOne(matcher.group(1).trim(), value, matcher.start(), matcher.end());
return propOne;
} else
{
return null;
}
}
}
| gzxishan/OftenPorter | Porter-Core/src/main/java/cn/xishan/oftenporter/porter/core/init/DealSharpProperties.java | Java | apache-2.0 | 8,870 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.