src_fm_fc_ms_ff
stringlengths 43
86.8k
| target
stringlengths 20
276k
|
---|---|
Result { @SuppressWarnings("ConstantConditions") public static <T> Result<T> response(Response<T> response) { if (response == null) throw new NullPointerException("response == null"); return new Result<>(response, null); } private Result(@Nullable Response<T> response, @Nullable Throwable error); @SuppressWarnings("ConstantConditions") // Guarding API nullability. static Result<T> error(Throwable error); @SuppressWarnings("ConstantConditions") // Guarding API nullability. static Result<T> response(Response<T> response); @Nullable Response<T> response(); @Nullable Throwable error(); boolean isError(); @Override String toString(); } | @Test public void response() { Response<String> response = Response.success("Hi"); Result<String> result = Result.response(response); assertThat(result.isError()).isFalse(); assertThat(result.error()).isNull(); assertThat(result.response()).isSameAs(response); }
@Test public void nullResponseThrows() { try { Result.response(null); fail(); } catch (NullPointerException e) { assertThat(e).hasMessage("response == null"); } } |
MoshiConverterFactory extends Converter.Factory { public MoshiConverterFactory failOnUnknown() { return new MoshiConverterFactory(moshi, lenient, true, serializeNulls); } private MoshiConverterFactory(
Moshi moshi, boolean lenient, boolean failOnUnknown, boolean serializeNulls); static MoshiConverterFactory create(); @SuppressWarnings("ConstantConditions") // Guarding API nullability. static MoshiConverterFactory create(Moshi moshi); MoshiConverterFactory asLenient(); MoshiConverterFactory failOnUnknown(); MoshiConverterFactory withNullSerialization(); @Override Converter<ResponseBody, ?> responseBodyConverter(
Type type, Annotation[] annotations, Retrofit retrofit); @Override Converter<?, RequestBody> requestBodyConverter(
Type type,
Annotation[] parameterAnnotations,
Annotation[] methodAnnotations,
Retrofit retrofit); } | @Test public void failOnUnknown() throws IOException, InterruptedException { server.enqueue(new MockResponse().setBody("{\"taco\":\"delicious\"}")); Call<AnImplementation> call = serviceFailOnUnknown.anImplementation(new AnImplementation(null)); try { call.execute(); fail(); } catch (JsonDataException e) { assertThat(e).hasMessage("Cannot skip unexpected NAME at $."); } } |
Result { @SuppressWarnings("ConstantConditions") public static <T> Result<T> error(Throwable error) { if (error == null) throw new NullPointerException("error == null"); return new Result<>(null, error); } private Result(@Nullable Response<T> response, @Nullable Throwable error); @SuppressWarnings("ConstantConditions") // Guarding API nullability. static Result<T> error(Throwable error); @SuppressWarnings("ConstantConditions") // Guarding API nullability. static Result<T> response(Response<T> response); @Nullable Response<T> response(); @Nullable Throwable error(); boolean isError(); @Override String toString(); } | @Test public void error() { Throwable error = new IOException(); Result<Object> result = Result.error(error); assertThat(result.isError()).isTrue(); assertThat(result.error()).isSameAs(error); assertThat(result.response()).isNull(); }
@Test public void nullErrorThrows() { try { Result.error(null); fail(); } catch (NullPointerException e) { assertThat(e).hasMessage("error == null"); } } |
GuavaCallAdapterFactory extends CallAdapter.Factory { @Override public @Nullable CallAdapter<?, ?> get( Type returnType, Annotation[] annotations, Retrofit retrofit) { if (getRawType(returnType) != ListenableFuture.class) { return null; } if (!(returnType instanceof ParameterizedType)) { throw new IllegalStateException( "ListenableFuture return type must be parameterized" + " as ListenableFuture<Foo> or ListenableFuture<? extends Foo>"); } Type innerType = getParameterUpperBound(0, (ParameterizedType) returnType); if (getRawType(innerType) != Response.class) { return new BodyCallAdapter<>(innerType); } if (!(innerType instanceof ParameterizedType)) { throw new IllegalStateException( "Response must be parameterized" + " as Response<Foo> or Response<? extends Foo>"); } Type responseType = getParameterUpperBound(0, (ParameterizedType) innerType); return new ResponseCallAdapter<>(responseType); } private GuavaCallAdapterFactory(); static GuavaCallAdapterFactory create(); @Override @Nullable CallAdapter<?, ?> get(
Type returnType, Annotation[] annotations, Retrofit retrofit); } | @Test public void responseType() { Type bodyClass = new TypeToken<ListenableFuture<String>>() {}.getType(); assertThat(factory.get(bodyClass, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(String.class); Type bodyWildcard = new TypeToken<ListenableFuture<? extends String>>() {}.getType(); assertThat(factory.get(bodyWildcard, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(String.class); Type bodyGeneric = new TypeToken<ListenableFuture<List<String>>>() {}.getType(); assertThat(factory.get(bodyGeneric, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(new TypeToken<List<String>>() {}.getType()); Type responseClass = new TypeToken<ListenableFuture<Response<String>>>() {}.getType(); assertThat(factory.get(responseClass, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(String.class); Type responseWildcard = new TypeToken<ListenableFuture<Response<? extends String>>>() {}.getType(); assertThat(factory.get(responseWildcard, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(String.class); Type resultClass = new TypeToken<ListenableFuture<Response<String>>>() {}.getType(); assertThat(factory.get(resultClass, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(String.class); Type resultWildcard = new TypeToken<ListenableFuture<Response<? extends String>>>() {}.getType(); assertThat(factory.get(resultWildcard, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(String.class); }
@Test public void nonListenableFutureReturnsNull() { CallAdapter<?, ?> adapter = factory.get(String.class, NO_ANNOTATIONS, retrofit); assertThat(adapter).isNull(); }
@Test public void rawTypeThrows() { Type observableType = new TypeToken<ListenableFuture>() {}.getType(); try { factory.get(observableType, NO_ANNOTATIONS, retrofit); fail(); } catch (IllegalStateException e) { assertThat(e) .hasMessage( "ListenableFuture return type must be parameterized as ListenableFuture<Foo> or ListenableFuture<? extends Foo>"); } }
@Test public void rawResponseTypeThrows() { Type observableType = new TypeToken<ListenableFuture<Response>>() {}.getType(); try { factory.get(observableType, NO_ANNOTATIONS, retrofit); fail(); } catch (IllegalStateException e) { assertThat(e) .hasMessage("Response must be parameterized as Response<Foo> or Response<? extends Foo>"); } } |
DefaultCallAdapterFactory extends CallAdapter.Factory { @Override public @Nullable CallAdapter<?, ?> get( Type returnType, Annotation[] annotations, Retrofit retrofit) { if (getRawType(returnType) != Call.class) { return null; } if (!(returnType instanceof ParameterizedType)) { throw new IllegalArgumentException( "Call return type must be parameterized as Call<Foo> or Call<? extends Foo>"); } final Type responseType = Utils.getParameterUpperBound(0, (ParameterizedType) returnType); final Executor executor = Utils.isAnnotationPresent(annotations, SkipCallbackExecutor.class) ? null : callbackExecutor; return new CallAdapter<Object, Call<?>>() { @Override public Type responseType() { return responseType; } @Override public Call<Object> adapt(Call<Object> call) { return executor == null ? call : new ExecutorCallbackCall<>(executor, call); } }; } DefaultCallAdapterFactory(@Nullable Executor callbackExecutor); @Override @Nullable CallAdapter<?, ?> get(
Type returnType, Annotation[] annotations, Retrofit retrofit); } | @Test public void rawTypeThrows() { try { factory.get(Call.class, NO_ANNOTATIONS, retrofit); fail(); } catch (IllegalArgumentException e) { assertThat(e) .hasMessage("Call return type must be parameterized as Call<Foo> or Call<? extends Foo>"); } }
@Test public void responseType() { Type classType = new TypeToken<Call<String>>() {}.getType(); assertThat(factory.get(classType, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(String.class); Type wilcardType = new TypeToken<Call<? extends String>>() {}.getType(); assertThat(factory.get(wilcardType, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(String.class); Type genericType = new TypeToken<Call<List<String>>>() {}.getType(); assertThat(factory.get(genericType, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(new TypeToken<List<String>>() {}.getType()); }
@Test public void adaptedCallExecute() throws IOException { Type returnType = new TypeToken<Call<String>>() {}.getType(); CallAdapter<String, Call<String>> adapter = (CallAdapter<String, Call<String>>) factory.get(returnType, NO_ANNOTATIONS, retrofit); final Response<String> response = Response.success("Hi"); Call<String> call = adapter.adapt( new EmptyCall() { @Override public Response<String> execute() { return response; } }); assertThat(call.execute()).isSameAs(response); }
@Test public void adaptedCallCloneDeepCopy() { Type returnType = new TypeToken<Call<String>>() {}.getType(); CallAdapter<String, Call<String>> adapter = (CallAdapter<String, Call<String>>) factory.get(returnType, NO_ANNOTATIONS, retrofit); final AtomicBoolean cloned = new AtomicBoolean(); Call<String> delegate = new EmptyCall() { @Override public Call<String> clone() { cloned.set(true); return this; } }; Call<String> call = adapter.adapt(delegate); assertThat(call.clone()).isNotSameAs(call); assertTrue(cloned.get()); }
@Test public void adaptedCallCancel() { Type returnType = new TypeToken<Call<String>>() {}.getType(); CallAdapter<String, Call<String>> adapter = (CallAdapter<String, Call<String>>) factory.get(returnType, NO_ANNOTATIONS, retrofit); final AtomicBoolean canceled = new AtomicBoolean(); Call<String> delegate = new EmptyCall() { @Override public void cancel() { canceled.set(true); } }; Call<String> call = adapter.adapt(delegate); call.cancel(); assertTrue(canceled.get()); } |
GuavaOptionalConverterFactory extends Converter.Factory { public static GuavaOptionalConverterFactory create() { return new GuavaOptionalConverterFactory(); } private GuavaOptionalConverterFactory(); static GuavaOptionalConverterFactory create(); @Override @Nullable Converter<ResponseBody, ?> responseBodyConverter(
Type type, Annotation[] annotations, Retrofit retrofit); } | @Test public void delegates() throws IOException { final Object object = new Object(); Retrofit retrofit = new Retrofit.Builder() .baseUrl(server.url("/")) .addConverterFactory( new Converter.Factory() { @Nullable @Override public Converter<ResponseBody, Object> responseBodyConverter( Type type, Annotation[] annotations, Retrofit retrofit) { if (getRawType(type) != Object.class) { return null; } return value -> object; } }) .addConverterFactory(GuavaOptionalConverterFactory.create()) .build(); server.enqueue(new MockResponse()); Service service = retrofit.create(Service.class); Optional<Object> optional = service.optional().execute().body(); assertThat(optional).isNotNull(); assertThat(optional.get()).isSameAs(object); } |
Response { public static <T> Response<T> success(@Nullable T body) { return success( body, new okhttp3.Response.Builder() .code(200) .message("OK") .protocol(Protocol.HTTP_1_1) .request(new Request.Builder().url("http: .build()); } private Response(
okhttp3.Response rawResponse, @Nullable T body, @Nullable ResponseBody errorBody); static Response<T> success(@Nullable T body); static Response<T> success(int code, @Nullable T body); static Response<T> success(@Nullable T body, Headers headers); static Response<T> success(@Nullable T body, okhttp3.Response rawResponse); static Response<T> error(int code, ResponseBody body); static Response<T> error(ResponseBody body, okhttp3.Response rawResponse); okhttp3.Response raw(); int code(); String message(); Headers headers(); boolean isSuccessful(); @Nullable T body(); @Nullable ResponseBody errorBody(); @Override String toString(); } | @Test public void success() { Object body = new Object(); Response<Object> response = Response.success(body); assertThat(response.raw()).isNotNull(); assertThat(response.code()).isEqualTo(200); assertThat(response.message()).isEqualTo("OK"); assertThat(response.headers().size()).isZero(); assertThat(response.isSuccessful()).isTrue(); assertThat(response.body()).isSameAs(body); assertThat(response.errorBody()).isNull(); }
@Test public void successWithNullHeadersThrows() { try { Response.success("", (okhttp3.Headers) null); fail(); } catch (NullPointerException e) { assertThat(e).hasMessage("headers == null"); } }
@Test public void successWithNullRawResponseThrows() { try { Response.success("", (okhttp3.Response) null); fail(); } catch (NullPointerException e) { assertThat(e).hasMessage("rawResponse == null"); } }
@Test public void successWithErrorRawResponseThrows() { try { Response.success("", errorResponse); fail(); } catch (IllegalArgumentException e) { assertThat(e).hasMessage("rawResponse must be successful response"); } } |
Response { public static <T> Response<T> error(int code, ResponseBody body) { Objects.requireNonNull(body, "body == null"); if (code < 400) throw new IllegalArgumentException("code < 400: " + code); return error( body, new okhttp3.Response.Builder() .body(new OkHttpCall.NoContentResponseBody(body.contentType(), body.contentLength())) .code(code) .message("Response.error()") .protocol(Protocol.HTTP_1_1) .request(new Request.Builder().url("http: .build()); } private Response(
okhttp3.Response rawResponse, @Nullable T body, @Nullable ResponseBody errorBody); static Response<T> success(@Nullable T body); static Response<T> success(int code, @Nullable T body); static Response<T> success(@Nullable T body, Headers headers); static Response<T> success(@Nullable T body, okhttp3.Response rawResponse); static Response<T> error(int code, ResponseBody body); static Response<T> error(ResponseBody body, okhttp3.Response rawResponse); okhttp3.Response raw(); int code(); String message(); Headers headers(); boolean isSuccessful(); @Nullable T body(); @Nullable ResponseBody errorBody(); @Override String toString(); } | @Test public void error() { MediaType plainText = MediaType.get("text/plain; charset=utf-8"); ResponseBody errorBody = ResponseBody.create(plainText, "Broken!"); Response<?> response = Response.error(400, errorBody); assertThat(response.raw()).isNotNull(); assertThat(response.raw().body().contentType()).isEqualTo(plainText); assertThat(response.raw().body().contentLength()).isEqualTo(7); try { response.raw().body().source(); fail(); } catch (IllegalStateException expected) { } assertThat(response.code()).isEqualTo(400); assertThat(response.message()).isEqualTo("Response.error()"); assertThat(response.headers().size()).isZero(); assertThat(response.isSuccessful()).isFalse(); assertThat(response.body()).isNull(); assertThat(response.errorBody()).isSameAs(errorBody); }
@Test public void nullErrorThrows() { try { Response.error(400, null); fail(); } catch (NullPointerException e) { assertThat(e).hasMessage("body == null"); } }
@Test public void errorWithSuccessCodeThrows() { ResponseBody errorBody = ResponseBody.create(null, "Broken!"); try { Response.error(200, errorBody); fail(); } catch (IllegalArgumentException e) { assertThat(e).hasMessage("code < 400: 200"); } }
@Test public void nullErrorWithRawResponseThrows() { try { Response.error(null, errorResponse); fail(); } catch (NullPointerException e) { assertThat(e).hasMessage("body == null"); } }
@Test public void errorWithNullRawResponseThrows() { ResponseBody errorBody = ResponseBody.create(null, "Broken!"); try { Response.error(errorBody, null); fail(); } catch (NullPointerException e) { assertThat(e).hasMessage("rawResponse == null"); } }
@Test public void errorWithSuccessRawResponseThrows() { ResponseBody errorBody = ResponseBody.create(null, "Broken!"); try { Response.error(errorBody, successResponse); fail(); } catch (IllegalArgumentException e) { assertThat(e).hasMessage("rawResponse should not be successful response"); } } |
RxJava3CallAdapterFactory extends CallAdapter.Factory { @SuppressWarnings("ConstantConditions") public static RxJava3CallAdapterFactory createWithScheduler(Scheduler scheduler) { if (scheduler == null) throw new NullPointerException("scheduler == null"); return new RxJava3CallAdapterFactory(scheduler, false); } private RxJava3CallAdapterFactory(@Nullable Scheduler scheduler, boolean isAsync); static RxJava3CallAdapterFactory create(); static RxJava3CallAdapterFactory createSynchronous(); @SuppressWarnings("ConstantConditions") // Guarding API nullability. static RxJava3CallAdapterFactory createWithScheduler(Scheduler scheduler); @Override @Nullable CallAdapter<?, ?> get(
Type returnType, Annotation[] annotations, Retrofit retrofit); } | @Test public void nullSchedulerThrows() { try { RxJava3CallAdapterFactory.createWithScheduler(null); fail(); } catch (NullPointerException e) { assertThat(e).hasMessage("scheduler == null"); } } |
Platform { static Platform get() { return PLATFORM; } Platform(boolean hasJava8Types); } | @Test public void isAndroid() { assertFalse(Platform.get() instanceof Platform.Android); } |
Invocation { public static Invocation of(Method method, List<?> arguments) { Objects.requireNonNull(method, "method == null"); Objects.requireNonNull(arguments, "arguments == null"); return new Invocation(method, new ArrayList<>(arguments)); } Invocation(Method method, List<?> arguments); static Invocation of(Method method, List<?> arguments); Method method(); List<?> arguments(); @Override String toString(); } | @Test public void nullMethod() { try { Invocation.of(null, Arrays.asList("one", "two")); fail(); } catch (NullPointerException expected) { assertThat(expected).hasMessage("method == null"); } }
@Test public void nullArguments() { try { Invocation.of(Example.class.getDeclaredMethods()[0], null); fail(); } catch (NullPointerException expected) { assertThat(expected).hasMessage("arguments == null"); } } |
HttpException extends RuntimeException { public @Nullable Response<?> response() { return response; } HttpException(Response<?> response); int code(); String message(); @Nullable Response<?> response(); } | @Test public void response() { Response<String> response = Response.success("Hi"); HttpException exception = new HttpException(response); assertThat(exception.code()).isEqualTo(200); assertThat(exception.message()).isEqualTo("OK"); assertThat(exception.response()).isSameAs(response); } |
Retrofit { public HttpUrl baseUrl() { return baseUrl; } Retrofit(
okhttp3.Call.Factory callFactory,
HttpUrl baseUrl,
List<Converter.Factory> converterFactories,
List<CallAdapter.Factory> callAdapterFactories,
@Nullable Executor callbackExecutor,
boolean validateEagerly); @SuppressWarnings("unchecked") // Single-interface proxy creation guarded by parameter safety. T create(final Class<T> service); okhttp3.Call.Factory callFactory(); HttpUrl baseUrl(); List<CallAdapter.Factory> callAdapterFactories(); CallAdapter<?, ?> callAdapter(Type returnType, Annotation[] annotations); CallAdapter<?, ?> nextCallAdapter(
@Nullable CallAdapter.Factory skipPast, Type returnType, Annotation[] annotations); List<Converter.Factory> converterFactories(); Converter<T, RequestBody> requestBodyConverter(
Type type, Annotation[] parameterAnnotations, Annotation[] methodAnnotations); Converter<T, RequestBody> nextRequestBodyConverter(
@Nullable Converter.Factory skipPast,
Type type,
Annotation[] parameterAnnotations,
Annotation[] methodAnnotations); Converter<ResponseBody, T> responseBodyConverter(Type type, Annotation[] annotations); Converter<ResponseBody, T> nextResponseBodyConverter(
@Nullable Converter.Factory skipPast, Type type, Annotation[] annotations); Converter<T, String> stringConverter(Type type, Annotation[] annotations); @Nullable Executor callbackExecutor(); Builder newBuilder(); } | @Test public void baseUrlNullThrows() { try { new Retrofit.Builder().baseUrl((String) null); fail(); } catch (NullPointerException e) { assertThat(e).hasMessage("baseUrl == null"); } try { new Retrofit.Builder().baseUrl((HttpUrl) null); fail(); } catch (NullPointerException e) { assertThat(e).hasMessage("baseUrl == null"); } }
@Test public void baseUrlInvalidThrows() { try { new Retrofit.Builder().baseUrl("ftp: fail(); } catch (IllegalArgumentException ignored) { } }
@Test public void baseUrlNoTrailingSlashThrows() { try { new Retrofit.Builder().baseUrl("http: fail(); } catch (IllegalArgumentException e) { assertThat(e).hasMessage("baseUrl must end in /: http: } HttpUrl parsed = HttpUrl.get("http: try { new Retrofit.Builder().baseUrl(parsed); fail(); } catch (IllegalArgumentException e) { assertThat(e).hasMessage("baseUrl must end in /: http: } }
@Test public void baseUrlStringPropagated() { Retrofit retrofit = new Retrofit.Builder().baseUrl("http: HttpUrl baseUrl = retrofit.baseUrl(); assertThat(baseUrl).isEqualTo(HttpUrl.get("http: }
@Test public void baseHttpUrlPropagated() { HttpUrl url = HttpUrl.get("http: Retrofit retrofit = new Retrofit.Builder().baseUrl(url).build(); assertThat(retrofit.baseUrl()).isSameAs(url); }
@Test public void baseJavaUrlPropagated() throws MalformedURLException { URL url = new URL("http: Retrofit retrofit = new Retrofit.Builder().baseUrl(url).build(); assertThat(retrofit.baseUrl()).isEqualTo(HttpUrl.get("http: } |
Retrofit { public @Nullable Executor callbackExecutor() { return callbackExecutor; } Retrofit(
okhttp3.Call.Factory callFactory,
HttpUrl baseUrl,
List<Converter.Factory> converterFactories,
List<CallAdapter.Factory> callAdapterFactories,
@Nullable Executor callbackExecutor,
boolean validateEagerly); @SuppressWarnings("unchecked") // Single-interface proxy creation guarded by parameter safety. T create(final Class<T> service); okhttp3.Call.Factory callFactory(); HttpUrl baseUrl(); List<CallAdapter.Factory> callAdapterFactories(); CallAdapter<?, ?> callAdapter(Type returnType, Annotation[] annotations); CallAdapter<?, ?> nextCallAdapter(
@Nullable CallAdapter.Factory skipPast, Type returnType, Annotation[] annotations); List<Converter.Factory> converterFactories(); Converter<T, RequestBody> requestBodyConverter(
Type type, Annotation[] parameterAnnotations, Annotation[] methodAnnotations); Converter<T, RequestBody> nextRequestBodyConverter(
@Nullable Converter.Factory skipPast,
Type type,
Annotation[] parameterAnnotations,
Annotation[] methodAnnotations); Converter<ResponseBody, T> responseBodyConverter(Type type, Annotation[] annotations); Converter<ResponseBody, T> nextResponseBodyConverter(
@Nullable Converter.Factory skipPast, Type type, Annotation[] annotations); Converter<T, String> stringConverter(Type type, Annotation[] annotations); @Nullable Executor callbackExecutor(); Builder newBuilder(); } | @Test public void callbackExecutorNullThrows() { try { new Retrofit.Builder().callbackExecutor(null); fail(); } catch (NullPointerException e) { assertThat(e).hasMessage("executor == null"); } } |
CompletableFutureCallAdapterFactory extends CallAdapter.Factory { @Override public @Nullable CallAdapter<?, ?> get( Type returnType, Annotation[] annotations, Retrofit retrofit) { if (getRawType(returnType) != CompletableFuture.class) { return null; } if (!(returnType instanceof ParameterizedType)) { throw new IllegalStateException( "CompletableFuture return type must be parameterized" + " as CompletableFuture<Foo> or CompletableFuture<? extends Foo>"); } Type innerType = getParameterUpperBound(0, (ParameterizedType) returnType); if (getRawType(innerType) != Response.class) { return new BodyCallAdapter<>(innerType); } if (!(innerType instanceof ParameterizedType)) { throw new IllegalStateException( "Response must be parameterized" + " as Response<Foo> or Response<? extends Foo>"); } Type responseType = getParameterUpperBound(0, (ParameterizedType) innerType); return new ResponseCallAdapter<>(responseType); } @Override @Nullable CallAdapter<?, ?> get(
Type returnType, Annotation[] annotations, Retrofit retrofit); } | @Test public void responseType() { Type bodyClass = new TypeToken<CompletableFuture<String>>() {}.getType(); assertThat(factory.get(bodyClass, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(String.class); Type bodyWildcard = new TypeToken<CompletableFuture<? extends String>>() {}.getType(); assertThat(factory.get(bodyWildcard, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(String.class); Type bodyGeneric = new TypeToken<CompletableFuture<List<String>>>() {}.getType(); assertThat(factory.get(bodyGeneric, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(new TypeToken<List<String>>() {}.getType()); Type responseClass = new TypeToken<CompletableFuture<Response<String>>>() {}.getType(); assertThat(factory.get(responseClass, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(String.class); Type responseWildcard = new TypeToken<CompletableFuture<Response<? extends String>>>() {}.getType(); assertThat(factory.get(responseWildcard, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(String.class); Type resultClass = new TypeToken<CompletableFuture<Response<String>>>() {}.getType(); assertThat(factory.get(resultClass, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(String.class); Type resultWildcard = new TypeToken<CompletableFuture<Response<? extends String>>>() {}.getType(); assertThat(factory.get(resultWildcard, NO_ANNOTATIONS, retrofit).responseType()) .isEqualTo(String.class); }
@Test public void nonListenableFutureReturnsNull() { CallAdapter<?, ?> adapter = factory.get(String.class, NO_ANNOTATIONS, retrofit); assertThat(adapter).isNull(); }
@Test public void rawTypeThrows() { Type observableType = new TypeToken<CompletableFuture>() {}.getType(); try { factory.get(observableType, NO_ANNOTATIONS, retrofit); fail(); } catch (IllegalStateException e) { assertThat(e) .hasMessage( "CompletableFuture return type must be parameterized as CompletableFuture<Foo> or CompletableFuture<? extends Foo>"); } }
@Test public void rawResponseTypeThrows() { Type observableType = new TypeToken<CompletableFuture<Response>>() {}.getType(); try { factory.get(observableType, NO_ANNOTATIONS, retrofit); fail(); } catch (IllegalStateException e) { assertThat(e) .hasMessage("Response must be parameterized as Response<Foo> or Response<? extends Foo>"); } } |
RequestFactory { okhttp3.Request create(Object[] args) throws IOException { @SuppressWarnings("unchecked") ParameterHandler<Object>[] handlers = (ParameterHandler<Object>[]) parameterHandlers; int argumentCount = args.length; if (argumentCount != handlers.length) { throw new IllegalArgumentException( "Argument count (" + argumentCount + ") doesn't match expected count (" + handlers.length + ")"); } RequestBuilder requestBuilder = new RequestBuilder( httpMethod, baseUrl, relativeUrl, headers, contentType, hasBody, isFormEncoded, isMultipart); if (isKotlinSuspendFunction) { argumentCount--; } List<Object> argumentList = new ArrayList<>(argumentCount); for (int p = 0; p < argumentCount; p++) { argumentList.add(args[p]); handlers[p].apply(requestBuilder, args[p]); } return requestBuilder.get().tag(Invocation.class, new Invocation(method, argumentList)).build(); } RequestFactory(Builder builder); } | @Test public void customMethodWithBody() { class Example { @HTTP(method = "CUSTOM2", path = "/foo", hasBody = true) Call<ResponseBody> method(@Body RequestBody body) { return null; } } RequestBody body = RequestBody.create(TEXT_PLAIN, "hi"); Request request = buildRequest(Example.class, body); assertThat(request.method()).isEqualTo("CUSTOM2"); assertThat(request.url().toString()).isEqualTo("http: assertBody(request.body(), "hi"); }
@Test public void post() { class Example { @POST("/foo/bar/") Call<ResponseBody> method(@Body RequestBody body) { return null; } } RequestBody body = RequestBody.create(TEXT_PLAIN, "hi"); Request request = buildRequest(Example.class, body); assertThat(request.method()).isEqualTo("POST"); assertThat(request.headers().size()).isZero(); assertThat(request.url().toString()).isEqualTo("http: assertBody(request.body(), "hi"); }
@Test public void put() { class Example { @PUT("/foo/bar/") Call<ResponseBody> method(@Body RequestBody body) { return null; } } RequestBody body = RequestBody.create(TEXT_PLAIN, "hi"); Request request = buildRequest(Example.class, body); assertThat(request.method()).isEqualTo("PUT"); assertThat(request.headers().size()).isZero(); assertThat(request.url().toString()).isEqualTo("http: assertBody(request.body(), "hi"); }
@Test public void patch() { class Example { @PATCH("/foo/bar/") Call<ResponseBody> method(@Body RequestBody body) { return null; } } RequestBody body = RequestBody.create(TEXT_PLAIN, "hi"); Request request = buildRequest(Example.class, body); assertThat(request.method()).isEqualTo("PATCH"); assertThat(request.headers().size()).isZero(); assertThat(request.url().toString()).isEqualTo("http: assertBody(request.body(), "hi"); }
@Test public void getWithJavaUriUrl() { class Example { @GET Call<ResponseBody> method(@Url URI url) { return null; } } Request request = buildRequest(Example.class, URI.create("foo/bar/")); assertThat(request.method()).isEqualTo("GET"); assertThat(request.headers().size()).isZero(); assertThat(request.url().toString()).isEqualTo("http: assertThat(request.body()).isNull(); }
@Test public void getWithJavaUriUrlAbsolute() { class Example { @GET Call<ResponseBody> method(@Url URI url) { return null; } } Request request = buildRequest(Example.class, URI.create("https: assertThat(request.method()).isEqualTo("GET"); assertThat(request.headers().size()).isZero(); assertThat(request.url().toString()).isEqualTo("https: assertThat(request.body()).isNull(); }
@Test public void postWithUrl() { class Example { @POST Call<ResponseBody> method(@Url String url, @Body RequestBody body) { return null; } } RequestBody body = RequestBody.create(TEXT_PLAIN, "hi"); Request request = buildRequest(Example.class, "http: assertThat(request.method()).isEqualTo("POST"); assertThat(request.headers().size()).isZero(); assertThat(request.url().toString()).isEqualTo("http: assertBody(request.body(), "hi"); }
@Test public void normalPostWithPathParam() { class Example { @POST("/foo/bar/{ping}/") Call<ResponseBody> method(@Path("ping") String ping, @Body RequestBody body) { return null; } } RequestBody body = RequestBody.create(TEXT_PLAIN, "Hi!"); Request request = buildRequest(Example.class, "pong", body); assertThat(request.method()).isEqualTo("POST"); assertThat(request.headers().size()).isZero(); assertThat(request.url().toString()).isEqualTo("http: assertBody(request.body(), "Hi!"); }
@Test public void bodyWithPathParams() { class Example { @POST("/foo/bar/{ping}/{kit}/") Call<ResponseBody> method( @Path("ping") String ping, @Body RequestBody body, @Path("kit") String kit) { return null; } } RequestBody body = RequestBody.create(TEXT_PLAIN, "Hi!"); Request request = buildRequest(Example.class, "pong", body, "kat"); assertThat(request.method()).isEqualTo("POST"); assertThat(request.headers().size()).isZero(); assertThat(request.url().toString()).isEqualTo("http: assertBody(request.body(), "Hi!"); }
@Test public void simpleMultipart() throws IOException { class Example { @Multipart @POST("/foo/bar/") Call<ResponseBody> method(@Part("ping") String ping, @Part("kit") RequestBody kit) { return null; } } Request request = buildRequest(Example.class, "pong", RequestBody.create(TEXT_PLAIN, "kat")); assertThat(request.method()).isEqualTo("POST"); assertThat(request.headers().size()).isZero(); assertThat(request.url().toString()).isEqualTo("http: RequestBody body = request.body(); assertThat(body.contentType().toString()).startsWith("multipart/form-data; boundary="); Buffer buffer = new Buffer(); body.writeTo(buffer); String bodyString = buffer.readUtf8(); assertThat(bodyString) .contains("Content-Disposition: form-data;") .contains("name=\"ping\"\r\n") .contains("\r\npong\r\n--"); assertThat(bodyString) .contains("Content-Disposition: form-data;") .contains("name=\"kit\"") .contains("\r\nkat\r\n--"); }
@Test public void multipartOkHttpPartWithFilename() throws IOException { class Example { @Multipart @POST("/foo/bar/") Call<ResponseBody> method(@Part MultipartBody.Part part) { return null; } } MultipartBody.Part part = MultipartBody.Part.createFormData("kit", "kit.txt", RequestBody.create(null, "kat")); Request request = buildRequest(Example.class, part); assertThat(request.method()).isEqualTo("POST"); assertThat(request.headers().size()).isZero(); assertThat(request.url().toString()).isEqualTo("http: RequestBody body = request.body(); Buffer buffer = new Buffer(); body.writeTo(buffer); String bodyString = buffer.readUtf8(); assertThat(bodyString) .contains("Content-Disposition: form-data;") .contains("name=\"kit\"; filename=\"kit.txt\"\r\n") .contains("\r\nkat\r\n--"); }
@Test public void multipartWithEncoding() throws IOException { class Example { @Multipart @POST("/foo/bar/") Call<ResponseBody> method( @Part(value = "ping", encoding = "8-bit") String ping, @Part(value = "kit", encoding = "7-bit") RequestBody kit) { return null; } } Request request = buildRequest(Example.class, "pong", RequestBody.create(TEXT_PLAIN, "kat")); assertThat(request.method()).isEqualTo("POST"); assertThat(request.headers().size()).isZero(); assertThat(request.url().toString()).isEqualTo("http: RequestBody body = request.body(); Buffer buffer = new Buffer(); body.writeTo(buffer); String bodyString = buffer.readUtf8(); assertThat(bodyString) .contains("Content-Disposition: form-data;") .contains("name=\"ping\"\r\n") .contains("Content-Transfer-Encoding: 8-bit") .contains("\r\npong\r\n--"); assertThat(bodyString) .contains("Content-Disposition: form-data;") .contains("name=\"kit\"") .contains("Content-Transfer-Encoding: 7-bit") .contains("\r\nkat\r\n--"); }
@Test public void multipartPartMap() throws IOException { class Example { @Multipart @POST("/foo/bar/") Call<ResponseBody> method(@PartMap Map<String, RequestBody> parts) { return null; } } Map<String, RequestBody> params = new LinkedHashMap<>(); params.put("ping", RequestBody.create(null, "pong")); params.put("kit", RequestBody.create(null, "kat")); Request request = buildRequest(Example.class, params); assertThat(request.method()).isEqualTo("POST"); assertThat(request.headers().size()).isZero(); assertThat(request.url().toString()).isEqualTo("http: RequestBody body = request.body(); Buffer buffer = new Buffer(); body.writeTo(buffer); String bodyString = buffer.readUtf8(); assertThat(bodyString) .contains("Content-Disposition: form-data;") .contains("name=\"ping\"\r\n") .contains("\r\npong\r\n--"); assertThat(bodyString) .contains("Content-Disposition: form-data;") .contains("name=\"kit\"") .contains("\r\nkat\r\n--"); }
@Test public void multipartPartMapWithEncoding() throws IOException { class Example { @Multipart @POST("/foo/bar/") Call<ResponseBody> method(@PartMap(encoding = "8-bit") Map<String, RequestBody> parts) { return null; } } Map<String, RequestBody> params = new LinkedHashMap<>(); params.put("ping", RequestBody.create(null, "pong")); params.put("kit", RequestBody.create(null, "kat")); Request request = buildRequest(Example.class, params); assertThat(request.method()).isEqualTo("POST"); assertThat(request.headers().size()).isZero(); assertThat(request.url().toString()).isEqualTo("http: RequestBody body = request.body(); Buffer buffer = new Buffer(); body.writeTo(buffer); String bodyString = buffer.readUtf8(); assertThat(bodyString) .contains("Content-Disposition: form-data;") .contains("name=\"ping\"\r\n") .contains("Content-Transfer-Encoding: 8-bit") .contains("\r\npong\r\n--"); assertThat(bodyString) .contains("Content-Disposition: form-data;") .contains("name=\"kit\"") .contains("Content-Transfer-Encoding: 8-bit") .contains("\r\nkat\r\n--"); }
@Test public void multipartPartMapRejectsNullKeys() { class Example { @Multipart @POST("/foo/bar/") Call<ResponseBody> method(@PartMap Map<String, RequestBody> parts) { return null; } } Map<String, RequestBody> params = new LinkedHashMap<>(); params.put("ping", RequestBody.create(null, "pong")); params.put(null, RequestBody.create(null, "kat")); try { buildRequest(Example.class, params); fail(); } catch (IllegalArgumentException e) { assertThat(e) .hasMessage( "Part map contained null key. (parameter #1)\n" + " for method Example.method"); } }
@Test public void multipartPartMapRejectsNullValues() { class Example { @Multipart @POST("/foo/bar/") Call<ResponseBody> method(@PartMap Map<String, RequestBody> parts) { return null; } } Map<String, RequestBody> params = new LinkedHashMap<>(); params.put("ping", RequestBody.create(null, "pong")); params.put("kit", null); try { buildRequest(Example.class, params); fail(); } catch (IllegalArgumentException e) { assertThat(e) .hasMessage( "Part map contained null value for key 'kit'. (parameter #1)\n" + " for method Example.method"); } }
@Test public void contentTypeAnnotationHeaderOverrides() { class Example { @POST("/") @Headers("Content-Type: text/not-plain") Call<ResponseBody> method(@Body RequestBody body) { return null; } } RequestBody body = RequestBody.create(TEXT_PLAIN, "hi"); Request request = buildRequest(Example.class, body); assertThat(request.body().contentType().toString()).isEqualTo("text/not-plain"); }
@Test public void contentTypeAnnotationHeaderOverridesMultipart() { class Example { @Multipart @POST("/foo/bar/") @Headers("Content-Type: text/not-plain") Call<ResponseBody> method(@Part("ping") String ping, @Part("kit") RequestBody kit) { return null; } } Request request = buildRequest(Example.class, "pong", RequestBody.create(TEXT_PLAIN, "kat")); RequestBody body = request.body(); assertThat(request.body().contentType().toString()).isEqualTo("text/not-plain"); }
@Test public void malformedContentTypeHeaderThrows() { class Example { @POST("/") @Headers("Content-Type: hello, world!") Call<ResponseBody> method(@Body RequestBody body) { return null; } } RequestBody body = RequestBody.create(TEXT_PLAIN, "hi"); try { buildRequest(Example.class, body); fail(); } catch (IllegalArgumentException e) { assertThat(e) .hasMessage("Malformed content type: hello, world!\n" + " for method Example.method"); assertThat(e.getCause()).isInstanceOf(IllegalArgumentException.class); } }
@Test public void contentTypeParameterHeaderOverrides() { class Example { @POST("/") Call<ResponseBody> method( @Header("Content-Type") String contentType, @Body RequestBody body) { return null; } } RequestBody body = RequestBody.create(TEXT_PLAIN, "Plain"); Request request = buildRequest(Example.class, "text/not-plain", body); assertThat(request.body().contentType().toString()).isEqualTo("text/not-plain"); }
@Test public void malformedContentTypeParameterThrows() { class Example { @POST("/") Call<ResponseBody> method( @Header("Content-Type") String contentType, @Body RequestBody body) { return null; } } RequestBody body = RequestBody.create(TEXT_PLAIN, "hi"); try { buildRequest(Example.class, "hello, world!", body); fail(); } catch (IllegalArgumentException e) { assertThat(e).hasMessage("Malformed content type: hello, world!"); assertThat(e.getCause()).isInstanceOf(IllegalArgumentException.class); } } |
NetworkBehavior { public Throwable failureException() { return failureException; } private NetworkBehavior(Random random); static NetworkBehavior create(); @SuppressWarnings("ConstantConditions") // Guarding API nullability. static NetworkBehavior create(Random random); void setDelay(long amount, TimeUnit unit); long delay(TimeUnit unit); void setVariancePercent(int variancePercent); int variancePercent(); void setFailurePercent(int failurePercent); int failurePercent(); @SuppressWarnings("ConstantConditions") // Guarding API nullability. void setFailureException(Throwable exception); Throwable failureException(); int errorPercent(); void setErrorPercent(int errorPercent); @SuppressWarnings("ConstantConditions") // Guarding API nullability. void setErrorFactory(Callable<Response<?>> errorFactory); Response<?> createErrorResponse(); boolean calculateIsFailure(); boolean calculateIsError(); long calculateDelay(TimeUnit unit); } | @Test public void defaultThrowable() { Throwable t = behavior.failureException(); assertThat(t) .isInstanceOf(IOException.class) .isExactlyInstanceOf(MockRetrofitIOException.class); assertThat(t.getStackTrace()).isEmpty(); } |
NetworkBehavior { public void setDelay(long amount, TimeUnit unit) { if (amount < 0) { throw new IllegalArgumentException("Amount must be positive value."); } this.delayMs = unit.toMillis(amount); } private NetworkBehavior(Random random); static NetworkBehavior create(); @SuppressWarnings("ConstantConditions") // Guarding API nullability. static NetworkBehavior create(Random random); void setDelay(long amount, TimeUnit unit); long delay(TimeUnit unit); void setVariancePercent(int variancePercent); int variancePercent(); void setFailurePercent(int failurePercent); int failurePercent(); @SuppressWarnings("ConstantConditions") // Guarding API nullability. void setFailureException(Throwable exception); Throwable failureException(); int errorPercent(); void setErrorPercent(int errorPercent); @SuppressWarnings("ConstantConditions") // Guarding API nullability. void setErrorFactory(Callable<Response<?>> errorFactory); Response<?> createErrorResponse(); boolean calculateIsFailure(); boolean calculateIsError(); long calculateDelay(TimeUnit unit); } | @Test public void delayMustBePositive() { try { behavior.setDelay(-1, SECONDS); fail(); } catch (IllegalArgumentException e) { assertThat(e).hasMessage("Amount must be positive value."); } } |
UriTemplateParser { public static void parse(String template, Handler handler) { assert template != null; assert handler != null; int pos = 0; final int length = template.length(); State state = State.OutsideParam; StringBuilder builder = new StringBuilder(); while (pos < length) { char c = template.charAt(pos++); switch (state) { case InsideParam: { if (c == '}') { if (builder.length() > 0) { handler.handleParam(builder.toString()); builder.setLength(0); } state = State.OutsideParam; } else { builder.append(c); } break; } case OutsideParam: { if (c == '{') { if (builder.length() > 0) { handler.handleText(builder.toString()); builder.setLength(0); } state = State.InsideParam; } else { builder.append(c); } break; } } } if (builder.length() > 0) { switch (state) { case InsideParam: handler.handleParam(builder.toString()); break; case OutsideParam: handler.handleText(builder.toString()); break; } } } static void parse(String template, Handler handler); } | @Test public void shouldParseParameterFirst() { UriTemplateParser.parse("{a}/bc", handler); verify(handler).handleParam("a"); verify(handler).handleText("/bc"); verifyNoMoreInteractions(handler); }
@Test public void shouldParseParameterLast() { UriTemplateParser.parse("bc/{a}", handler); verify(handler).handleText("bc/"); verify(handler).handleParam("a"); verifyNoMoreInteractions(handler); }
@Test public void shouldParseMultipleParameters() { UriTemplateParser.parse("bc/{a}{b}", handler); verify(handler).handleText("bc/"); verify(handler).handleParam("a"); verify(handler).handleParam("b"); verifyNoMoreInteractions(handler); }
@Test public void shouldParseEmpty() { UriTemplateParser.parse("", handler); verifyNoMoreInteractions(handler); } |
WadlXsltUtils { public static String hypernizeURI(String uri) { String result = uri.replaceAll("/", "/​"); return result; } static InputStream getUpgradeTransformAsStream(); static InputStream getWadlSummaryTransform(); static String hypernizeURI(String uri); } | @Test public void hypernizUriTest() { String uri = "http: String result = WadlXsltUtils.hypernizeURI(uri); assertThat(5, equalTo(result.split("&").length)); } |
WadlAstBuilder { public ApplicationNode buildAst(URI rootFile) throws InvalidWADLException, IOException { try { Application a = processDescription(rootFile); return buildAst(a,rootFile); } catch (JAXBException ex) { throw new RuntimeException("Internal error",ex); } } WadlAstBuilder(
SchemaCallback schemaCallback,
MessageListener messageListener); Map<String, ResourceTypeNode> getInterfaceMap(); ApplicationNode buildAst(URI rootFile); static InvalidWADLException messageStringFromObject(String message, Object obj); } | @Test public void testSoapUIYahooSearch() throws InvalidWADLException, IOException, URISyntaxException { WadlAstBuilder builder = new WadlAstBuilder( new WadlAstBuilder.SchemaCallback() { public void processSchema(InputSource is) { } public void processSchema(String uri, Element node) { } }, new MessageListener() { public void warning(String message, Throwable throwable) { } public void info(String message) { } public void error(String message, Throwable throwable) { } }); ApplicationNode an = builder.buildAst(WadlAstBuilderTest.class.getResource("SoapUIYahooSearch.wadl").toURI()); List<MethodNode> methods = an.getResources().get(0).getChildResources().get(0).getMethods(); assertThat("Only one method", methods.size(), equalTo(1)); List<RepresentationNode> supportedOutputs = new ArrayList<RepresentationNode>(); for (List<RepresentationNode> nodeList : methods.get(0).getSupportedOutputs().values()) { for (RepresentationNode node : nodeList) { supportedOutputs.add(node); } } assertThat("Only one output", supportedOutputs.size(), equalTo(1)); assertThat("Only one fault", methods.get(0).getFaults().size(), equalTo(1)); } |
JModule { public String name() { return name; } JModule(final String name); String name(); void _exports(final JPackage pkg); void _exports(final Collection<JPackage> pkgs, final boolean addEmpty); void _requires(final String name, final boolean isPublic, final boolean isStatic); void _requires(final String name); void _requires(final boolean isPublic, final boolean isStatic, final String ...names); void _requires(final String ...names); JFormatter generate(final JFormatter f); } | @Test public void testName() { final JModule instance = new JModule(MODULE_NAME); assertEquals(MODULE_NAME, instance.name()); } |
JModule { public void _exports(final JPackage pkg) { directives.add(new JExportsDirective(pkg.name())); } JModule(final String name); String name(); void _exports(final JPackage pkg); void _exports(final Collection<JPackage> pkgs, final boolean addEmpty); void _requires(final String name, final boolean isPublic, final boolean isStatic); void _requires(final String name); void _requires(final boolean isPublic, final boolean isStatic, final String ...names); void _requires(final String ...names); JFormatter generate(final JFormatter f); } | @Test public void test_exports() { final JModule instance = new JModule(MODULE_NAME); final JCodeModel cm = new JCodeModel(); final JPackage pkg = new JPackage(PKG_NAME, cm); instance._exports(pkg); JModuleDirective directive = directivesSingleElementCheck(instance); assertTrue(directive instanceof JExportsDirective); assertEquals(PKG_NAME, directive.name); } |
JModule { public void _requires(final String name, final boolean isPublic, final boolean isStatic) { directives.add(new JRequiresDirective(name, isPublic, isStatic)); } JModule(final String name); String name(); void _exports(final JPackage pkg); void _exports(final Collection<JPackage> pkgs, final boolean addEmpty); void _requires(final String name, final boolean isPublic, final boolean isStatic); void _requires(final String name); void _requires(final boolean isPublic, final boolean isStatic, final String ...names); void _requires(final String ...names); JFormatter generate(final JFormatter f); } | @Test public void test_requires() { final JModule instance = new JModule(MODULE_NAME); instance._requires(DEP_MODULE_NAME); JModuleDirective directive = directivesSingleElementCheck(instance); assertTrue(directive instanceof JRequiresDirective); assertEquals(DEP_MODULE_NAME, directive.name); } |
JModule { public JFormatter generate(final JFormatter f) { f.p("module").p(name); f.p('{').nl(); if (!directives.isEmpty()) { f.i(); for (final JModuleDirective directive : directives) { directive.generate(f); } f.o(); } f.p('}').nl(); return f; } JModule(final String name); String name(); void _exports(final JPackage pkg); void _exports(final Collection<JPackage> pkgs, final boolean addEmpty); void _requires(final String name, final boolean isPublic, final boolean isStatic); void _requires(final String name); void _requires(final boolean isPublic, final boolean isStatic, final String ...names); void _requires(final String ...names); JFormatter generate(final JFormatter f); } | @Test public void testGenerate() { final JModule instance = new JModule(MODULE_NAME); instance.generate(jf); final String output = normalizeWhiteSpaces(out.toString()); verifyModuleEnvelope(output, instance); } |
ApNavigator implements Navigator<TypeMirror, TypeElement, VariableElement, ExecutableElement> { public VariableElement[] getEnumConstants(TypeElement clazz) { List<? extends Element> elements = env.getElementUtils().getAllMembers(clazz); Collection<VariableElement> constants = new ArrayList<VariableElement>(); for (Element element : elements) { if (element.getKind().equals(ElementKind.ENUM_CONSTANT)) { constants.add((VariableElement) element); } } return constants.toArray(new VariableElement[constants.size()]); } ApNavigator(ProcessingEnvironment env); TypeElement getSuperClass(TypeElement typeElement); TypeMirror getBaseClass(TypeMirror type, TypeElement sup); String getClassName(TypeElement t); String getTypeName(TypeMirror typeMirror); String getClassShortName(TypeElement t); Collection<VariableElement> getDeclaredFields(TypeElement typeElement); VariableElement getDeclaredField(TypeElement clazz, String fieldName); Collection<ExecutableElement> getDeclaredMethods(TypeElement typeElement); TypeElement getDeclaringClassForField(VariableElement f); TypeElement getDeclaringClassForMethod(ExecutableElement m); TypeMirror getFieldType(VariableElement f); String getFieldName(VariableElement f); String getMethodName(ExecutableElement m); TypeMirror getReturnType(ExecutableElement m); TypeMirror[] getMethodParameters(ExecutableElement m); boolean isStaticMethod(ExecutableElement m); boolean isFinalMethod(ExecutableElement m); boolean isSubClassOf(TypeMirror sub, TypeMirror sup); TypeMirror ref(Class c); TypeMirror use(TypeElement t); TypeElement asDecl(TypeMirror m); TypeElement asDecl(Class c); TypeMirror erasure(TypeMirror t); boolean isAbstract(TypeElement clazz); boolean isFinal(TypeElement clazz); VariableElement[] getEnumConstants(TypeElement clazz); TypeMirror getVoidType(); String getPackageName(TypeElement clazz); @Override TypeElement loadObjectFactory(TypeElement referencePoint, String packageName); boolean isBridgeMethod(ExecutableElement method); boolean isOverriding(ExecutableElement method, TypeElement base); boolean isInterface(TypeElement clazz); boolean isTransient(VariableElement f); boolean isInnerClass(TypeElement clazz); @Override boolean isSameType(TypeMirror t1, TypeMirror t2); boolean isArray(TypeMirror type); boolean isArrayButNotByteArray(TypeMirror t); TypeMirror getComponentType(TypeMirror t); TypeMirror getTypeArgument(TypeMirror typeMirror, int i); boolean isParameterizedType(TypeMirror typeMirror); boolean isPrimitive(TypeMirror t); TypeMirror getPrimitive(Class primitiveType); Location getClassLocation(TypeElement typeElement); Location getFieldLocation(VariableElement variableElement); Location getMethodLocation(ExecutableElement executableElement); boolean hasDefaultConstructor(TypeElement t); boolean isStaticField(VariableElement f); boolean isPublicMethod(ExecutableElement m); boolean isPublicField(VariableElement f); boolean isEnum(TypeElement t); } | @Test public void testgetEnumConstantsOrder( @Mocked final ProcessingEnvironment env, @Mocked final TypeElement clazz, @Mocked final VariableElement enumElement1, @Mocked final VariableElement enumElement2, @Mocked final VariableElement enumElement3, @Mocked final VariableElement enumElement4 ) throws Exception { new Expectations() { { env.getTypeUtils().getPrimitiveType(TypeKind.BYTE); result = (PrimitiveType) null; enumElement1.getKind(); result = ElementKind.ENUM_CONSTANT; enumElement2.getKind(); result = ElementKind.ENUM_CONSTANT; enumElement3.getKind(); result = ElementKind.ENUM_CONSTANT; enumElement4.getKind(); result = ElementKind.ENUM_CONSTANT; enumElement1.hashCode(); result = 4; enumElement2.hashCode(); result = 3; enumElement3.hashCode(); result = 2; enumElement4.hashCode(); result = 1; env.getElementUtils().getAllMembers(clazz); result = Arrays.asList(enumElement1, enumElement2, enumElement3, enumElement4); } }; ApNavigator apn = new ApNavigator(env); VariableElement[] resArr = apn.getEnumConstants(clazz); assertTrue("Position of first element is changed", resArr[0] == enumElement1); assertTrue("Position of second element is changed", resArr[1] == enumElement2); assertTrue("Position of third element is changed", resArr[2] == enumElement3); assertTrue("Position of fourth element is changed", resArr[3] == enumElement4); } |
SchemaGenerator extends AbstractProcessor { private void filterClass(List<Reference> result, Collection<? extends Element> elements) { for (Element element : elements) { final ElementKind kind = element.getKind(); if (ElementKind.CLASS.equals(kind) || ElementKind.ENUM.equals(kind)) { result.add(new Reference((TypeElement) element, processingEnv)); filterClass(result, ElementFilter.typesIn(element.getEnclosedElements())); } } } SchemaGenerator(); SchemaGenerator( Map<String,File> m ); void setEpisodeFile(File episodeFile); @Override boolean process(Set<? extends TypeElement> annotations, RoundEnvironment roundEnv); @Override SourceVersion getSupportedSourceVersion(); } | @Test public void filterClassTest( @Mocked Reference ref, @Mocked final TypeElement interfaceElement, @Mocked final TypeElement enumElement, @Mocked final TypeElement classElement, @Mocked final TypeElement nestedClassElement, @Mocked final TypeElement nestedEnumElement) throws Exception { new Expectations() {{ interfaceElement.getKind(); result = ElementKind.INTERFACE; enumElement.getKind(); result = ElementKind.ENUM; nestedEnumElement.getKind(); result = ElementKind.ENUM; classElement.getKind(); result = ElementKind.CLASS; nestedClassElement.getKind(); result = ElementKind.CLASS; enumElement.getEnclosedElements(); result = Arrays.asList(interfaceElement, nestedClassElement); classElement.getEnclosedElements(); result = Arrays.asList(nestedEnumElement, interfaceElement); }}; List<Reference> result = new ArrayList<Reference>(); SchemaGenerator sg = new SchemaGenerator(); Collection<TypeElement> elements = Collections.singletonList(interfaceElement); invoke(sg, "filterClass", result, elements); assertTrue("Expected no root types to be found. But found: " + result.size(), result.isEmpty()); elements = Arrays.asList(interfaceElement, enumElement, classElement); invoke(sg, "filterClass", result, elements); assertTrue("Expected 4 root types to be found. But found: " + result.size(), result.size() == 4); new Verifications() {{ interfaceElement.getEnclosedElements(); maxTimes = 0; nestedClassElement.getEnclosedElements(); maxTimes = 1; nestedEnumElement.getEnclosedElements(); maxTimes = 1; }}; } |
SchemaGenerator { private static String setClasspath(String givenClasspath) { StringBuilder cp = new StringBuilder(); appendPath(cp, givenClasspath); ClassLoader cl = Thread.currentThread().getContextClassLoader(); while (cl != null) { if (cl instanceof URLClassLoader) { for (URL url : ((URLClassLoader) cl).getURLs()) { try { appendPath(cp,new File(url.toURI()).getPath()); } catch(URISyntaxException ex) { LOGGER.log(Level.SEVERE, ex.getMessage(), ex); } } } cl = cl.getParent(); } appendPath(cp, findJaxbApiJar()); return cp.toString(); } static void main(String[] args); static int run(String[] args); static int run(String[] args, ClassLoader classLoader); } | @Test public void setClassPathTest() throws Exception { final URL cUrl = new MockUp<URL>() { String path = "C:"; @Mock public String getPath() { return "/" + path; } @Mock public URI toURI() { return new File(path).toURI(); } }.getMockInstance(); new MockUp<URLClassLoader>() { @Mock URL[] getURLs() { URL[] urls = { cUrl }; return urls; } }; new Expectations(SchemaGenerator.class) {{ invoke(SchemaGenerator.class, "findJaxbApiJar"); result = ""; }}; String result = invoke(SchemaGenerator.class, "setClasspath", ""); String sepChar = File.pathSeparator; assertFalse("Result classpath contains incorrect drive path", result.contains(sepChar+"/C:")); } |
PluginImpl extends Plugin { public boolean run(@NotNull Outline model, Options opt, ErrorHandler errorHandler) { checkAndInject(model.getClasses()); checkAndInject(model.getEnums()); return true; } String getOptionName(); List<String> getCustomizationURIs(); boolean isCustomizationTagName(String nsUri, String localName); String getUsage(); boolean run(@NotNull Outline model, Options opt, ErrorHandler errorHandler); } | @Test public void pluginRunTest(final @Mocked Outline model, @Mocked Options opt, @Mocked ErrorHandler errorHandler) { new Expectations() {{ Collection<? extends CustomizableOutline> target = Collections.emptyList(); model.getClasses(); result = target; Deencapsulation.invoke(PluginImpl.class, "checkAndInject", target); model.getEnums(); result = target; Deencapsulation.invoke(PluginImpl.class, "checkAndInject", target); }}; new PluginImpl().run(model, opt, errorHandler); } |
AccessController extends BaseMasterAndRegionObserver implements RegionServerObserver,
AccessControlService.Interface, CoprocessorService, EndpointObserver, BulkLoadObserver { @Override public boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c, boolean newValue) throws IOException { requirePermission("balanceSwitch", Action.ADMIN); return newValue; } static boolean isAuthorizationSupported(Configuration conf); static boolean isCellAuthorizationSupported(Configuration conf); Region getRegion(); TableAuthManager getAuthManager(); void requireNamespacePermission(String request, String namespace,
Action... permissions); void requireNamespacePermission(String request, String namespace, TableName tableName,
Map<byte[], ? extends Collection<byte[]>> familyMap, Action... permissions); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); @Override void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void postCreateTableHandler(final ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HTableDescriptor htd); @Override void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName, final HTableDescriptor htd); @Override void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor column); @Override void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor descriptor); @Override void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
byte[] col); @Override void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName, final byte[] col); @Override void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preAbortProcedure(
ObserverContext<MasterCoprocessorEnvironment> ctx,
final ProcedureExecutor<MasterProcedureEnv> procEnv,
final long procId); @Override void postAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void postListProcedures(
ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ProcedureInfo> procInfoList); @Override void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region,
ServerName srcServer, ServerName destServer); @Override void preAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo); @Override void preUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo,
boolean force); @Override void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> c,
HRegionInfo regionInfo); @Override void preBalance(ObserverContext<MasterCoprocessorEnvironment> c); @Override boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c,
boolean newValue); @Override void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c); @Override void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c); @Override void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preListSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace); @Override void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors); @Override void preTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preOpen(ObserverContext<RegionCoprocessorEnvironment> e); @Override void postOpen(ObserverContext<RegionCoprocessorEnvironment> c); @Override void postLogReplay(ObserverContext<RegionCoprocessorEnvironment> c); @Override void preFlush(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e,
byte[] splitRow); @Override InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, final InternalScanner scanner, final ScanType scanType); @Override void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final Result result); @Override void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final List<Cell> result); @Override boolean preExists(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final boolean exists); @Override void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp); @Override void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier,
final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete,
final boolean result); @Override boolean preCheckAndDeleteAfterRowLock(
final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family,
final byte[] qualifier, final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete, final boolean result); @Override long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final long amount, final boolean writeToWAL); @Override Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append); @Override Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Append append); @Override Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Cell postMutationBeforeWAL(ObserverContext<RegionCoprocessorEnvironment> ctx,
MutationType opType, Mutation mutation, Cell oldCell, Cell newCell); @Override RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s, final List<Result> result,
final int limit, final boolean hasNext); @Override void preScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void postScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> familyPaths); @Override void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
PrepareBulkLoadRequest request); @Override void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
CleanupBulkLoadRequest request); @Override Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request); @Override void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request, Message.Builder responseBuilder); @Override void grant(RpcController controller,
AccessControlProtos.GrantRequest request,
RpcCallback<AccessControlProtos.GrantResponse> done); @Override void revoke(RpcController controller,
AccessControlProtos.RevokeRequest request,
RpcCallback<AccessControlProtos.RevokeResponse> done); @Override void getUserPermissions(RpcController controller,
AccessControlProtos.GetUserPermissionsRequest request,
RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done); @Override void checkPermissions(RpcController controller,
AccessControlProtos.CheckPermissionsRequest request,
RpcCallback<AccessControlProtos.CheckPermissionsResponse> done); @Override Service getService(); @Override void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested); @Override void preStopRegionServer(
ObserverContext<RegionServerCoprocessorEnvironment> env); @Override void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors, String regex); @Override void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
Region regionB); @Override void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
Region regionB, Region mergedRegion); @Override void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, List<Mutation> metaEntries); @Override void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, Region mergedRegion); @Override void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override void postRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override ReplicationEndpoint postCreateReplicationEndPoint(
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint); @Override void preReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final TableName tableName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final String namespace, final Quotas quotas); @Override void preSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, final Quotas quotas); @Override void preSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas); } | @Test (timeout=180000) public void testBalanceSwitch() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preBalanceSwitch(ObserverContext.createAndPrepare(CP_ENV, null), true); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } |
AccessController extends BaseMasterAndRegionObserver implements RegionServerObserver,
AccessControlService.Interface, CoprocessorService, EndpointObserver, BulkLoadObserver { @Override public void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c) throws IOException { requirePermission("shutdown", Action.ADMIN); } static boolean isAuthorizationSupported(Configuration conf); static boolean isCellAuthorizationSupported(Configuration conf); Region getRegion(); TableAuthManager getAuthManager(); void requireNamespacePermission(String request, String namespace,
Action... permissions); void requireNamespacePermission(String request, String namespace, TableName tableName,
Map<byte[], ? extends Collection<byte[]>> familyMap, Action... permissions); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); @Override void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void postCreateTableHandler(final ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HTableDescriptor htd); @Override void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName, final HTableDescriptor htd); @Override void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor column); @Override void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor descriptor); @Override void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
byte[] col); @Override void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName, final byte[] col); @Override void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preAbortProcedure(
ObserverContext<MasterCoprocessorEnvironment> ctx,
final ProcedureExecutor<MasterProcedureEnv> procEnv,
final long procId); @Override void postAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void postListProcedures(
ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ProcedureInfo> procInfoList); @Override void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region,
ServerName srcServer, ServerName destServer); @Override void preAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo); @Override void preUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo,
boolean force); @Override void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> c,
HRegionInfo regionInfo); @Override void preBalance(ObserverContext<MasterCoprocessorEnvironment> c); @Override boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c,
boolean newValue); @Override void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c); @Override void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c); @Override void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preListSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace); @Override void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors); @Override void preTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preOpen(ObserverContext<RegionCoprocessorEnvironment> e); @Override void postOpen(ObserverContext<RegionCoprocessorEnvironment> c); @Override void postLogReplay(ObserverContext<RegionCoprocessorEnvironment> c); @Override void preFlush(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e,
byte[] splitRow); @Override InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, final InternalScanner scanner, final ScanType scanType); @Override void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final Result result); @Override void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final List<Cell> result); @Override boolean preExists(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final boolean exists); @Override void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp); @Override void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier,
final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete,
final boolean result); @Override boolean preCheckAndDeleteAfterRowLock(
final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family,
final byte[] qualifier, final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete, final boolean result); @Override long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final long amount, final boolean writeToWAL); @Override Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append); @Override Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Append append); @Override Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Cell postMutationBeforeWAL(ObserverContext<RegionCoprocessorEnvironment> ctx,
MutationType opType, Mutation mutation, Cell oldCell, Cell newCell); @Override RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s, final List<Result> result,
final int limit, final boolean hasNext); @Override void preScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void postScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> familyPaths); @Override void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
PrepareBulkLoadRequest request); @Override void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
CleanupBulkLoadRequest request); @Override Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request); @Override void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request, Message.Builder responseBuilder); @Override void grant(RpcController controller,
AccessControlProtos.GrantRequest request,
RpcCallback<AccessControlProtos.GrantResponse> done); @Override void revoke(RpcController controller,
AccessControlProtos.RevokeRequest request,
RpcCallback<AccessControlProtos.RevokeResponse> done); @Override void getUserPermissions(RpcController controller,
AccessControlProtos.GetUserPermissionsRequest request,
RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done); @Override void checkPermissions(RpcController controller,
AccessControlProtos.CheckPermissionsRequest request,
RpcCallback<AccessControlProtos.CheckPermissionsResponse> done); @Override Service getService(); @Override void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested); @Override void preStopRegionServer(
ObserverContext<RegionServerCoprocessorEnvironment> env); @Override void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors, String regex); @Override void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
Region regionB); @Override void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
Region regionB, Region mergedRegion); @Override void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, List<Mutation> metaEntries); @Override void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, Region mergedRegion); @Override void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override void postRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override ReplicationEndpoint postCreateReplicationEndPoint(
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint); @Override void preReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final TableName tableName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final String namespace, final Quotas quotas); @Override void preSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, final Quotas quotas); @Override void preSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas); } | @Test (timeout=180000) public void testShutdown() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preShutdown(ObserverContext.createAndPrepare(CP_ENV, null)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } |
AccessController extends BaseMasterAndRegionObserver implements RegionServerObserver,
AccessControlService.Interface, CoprocessorService, EndpointObserver, BulkLoadObserver { @Override public void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c) throws IOException { requirePermission("stopMaster", Action.ADMIN); } static boolean isAuthorizationSupported(Configuration conf); static boolean isCellAuthorizationSupported(Configuration conf); Region getRegion(); TableAuthManager getAuthManager(); void requireNamespacePermission(String request, String namespace,
Action... permissions); void requireNamespacePermission(String request, String namespace, TableName tableName,
Map<byte[], ? extends Collection<byte[]>> familyMap, Action... permissions); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); @Override void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void postCreateTableHandler(final ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HTableDescriptor htd); @Override void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName, final HTableDescriptor htd); @Override void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor column); @Override void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor descriptor); @Override void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
byte[] col); @Override void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName, final byte[] col); @Override void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preAbortProcedure(
ObserverContext<MasterCoprocessorEnvironment> ctx,
final ProcedureExecutor<MasterProcedureEnv> procEnv,
final long procId); @Override void postAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void postListProcedures(
ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ProcedureInfo> procInfoList); @Override void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region,
ServerName srcServer, ServerName destServer); @Override void preAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo); @Override void preUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo,
boolean force); @Override void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> c,
HRegionInfo regionInfo); @Override void preBalance(ObserverContext<MasterCoprocessorEnvironment> c); @Override boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c,
boolean newValue); @Override void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c); @Override void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c); @Override void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preListSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace); @Override void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors); @Override void preTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preOpen(ObserverContext<RegionCoprocessorEnvironment> e); @Override void postOpen(ObserverContext<RegionCoprocessorEnvironment> c); @Override void postLogReplay(ObserverContext<RegionCoprocessorEnvironment> c); @Override void preFlush(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e,
byte[] splitRow); @Override InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, final InternalScanner scanner, final ScanType scanType); @Override void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final Result result); @Override void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final List<Cell> result); @Override boolean preExists(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final boolean exists); @Override void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp); @Override void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier,
final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete,
final boolean result); @Override boolean preCheckAndDeleteAfterRowLock(
final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family,
final byte[] qualifier, final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete, final boolean result); @Override long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final long amount, final boolean writeToWAL); @Override Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append); @Override Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Append append); @Override Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Cell postMutationBeforeWAL(ObserverContext<RegionCoprocessorEnvironment> ctx,
MutationType opType, Mutation mutation, Cell oldCell, Cell newCell); @Override RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s, final List<Result> result,
final int limit, final boolean hasNext); @Override void preScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void postScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> familyPaths); @Override void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
PrepareBulkLoadRequest request); @Override void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
CleanupBulkLoadRequest request); @Override Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request); @Override void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request, Message.Builder responseBuilder); @Override void grant(RpcController controller,
AccessControlProtos.GrantRequest request,
RpcCallback<AccessControlProtos.GrantResponse> done); @Override void revoke(RpcController controller,
AccessControlProtos.RevokeRequest request,
RpcCallback<AccessControlProtos.RevokeResponse> done); @Override void getUserPermissions(RpcController controller,
AccessControlProtos.GetUserPermissionsRequest request,
RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done); @Override void checkPermissions(RpcController controller,
AccessControlProtos.CheckPermissionsRequest request,
RpcCallback<AccessControlProtos.CheckPermissionsResponse> done); @Override Service getService(); @Override void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested); @Override void preStopRegionServer(
ObserverContext<RegionServerCoprocessorEnvironment> env); @Override void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors, String regex); @Override void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
Region regionB); @Override void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
Region regionB, Region mergedRegion); @Override void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, List<Mutation> metaEntries); @Override void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, Region mergedRegion); @Override void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override void postRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override ReplicationEndpoint postCreateReplicationEndPoint(
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint); @Override void preReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final TableName tableName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final String namespace, final Quotas quotas); @Override void preSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, final Quotas quotas); @Override void preSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas); } | @Test (timeout=180000) public void testStopMaster() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preStopMaster(ObserverContext.createAndPrepare(CP_ENV, null)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } |
AccessController extends BaseMasterAndRegionObserver implements RegionServerObserver,
AccessControlService.Interface, CoprocessorService, EndpointObserver, BulkLoadObserver { @Override public void preSplit(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException { requirePermission("split", getTableName(e.getEnvironment()), null, null, Action.ADMIN); } static boolean isAuthorizationSupported(Configuration conf); static boolean isCellAuthorizationSupported(Configuration conf); Region getRegion(); TableAuthManager getAuthManager(); void requireNamespacePermission(String request, String namespace,
Action... permissions); void requireNamespacePermission(String request, String namespace, TableName tableName,
Map<byte[], ? extends Collection<byte[]>> familyMap, Action... permissions); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); @Override void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void postCreateTableHandler(final ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HTableDescriptor htd); @Override void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName, final HTableDescriptor htd); @Override void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor column); @Override void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor descriptor); @Override void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
byte[] col); @Override void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName, final byte[] col); @Override void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preAbortProcedure(
ObserverContext<MasterCoprocessorEnvironment> ctx,
final ProcedureExecutor<MasterProcedureEnv> procEnv,
final long procId); @Override void postAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void postListProcedures(
ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ProcedureInfo> procInfoList); @Override void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region,
ServerName srcServer, ServerName destServer); @Override void preAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo); @Override void preUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo,
boolean force); @Override void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> c,
HRegionInfo regionInfo); @Override void preBalance(ObserverContext<MasterCoprocessorEnvironment> c); @Override boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c,
boolean newValue); @Override void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c); @Override void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c); @Override void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preListSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace); @Override void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors); @Override void preTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preOpen(ObserverContext<RegionCoprocessorEnvironment> e); @Override void postOpen(ObserverContext<RegionCoprocessorEnvironment> c); @Override void postLogReplay(ObserverContext<RegionCoprocessorEnvironment> c); @Override void preFlush(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e,
byte[] splitRow); @Override InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, final InternalScanner scanner, final ScanType scanType); @Override void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final Result result); @Override void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final List<Cell> result); @Override boolean preExists(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final boolean exists); @Override void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp); @Override void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier,
final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete,
final boolean result); @Override boolean preCheckAndDeleteAfterRowLock(
final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family,
final byte[] qualifier, final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete, final boolean result); @Override long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final long amount, final boolean writeToWAL); @Override Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append); @Override Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Append append); @Override Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Cell postMutationBeforeWAL(ObserverContext<RegionCoprocessorEnvironment> ctx,
MutationType opType, Mutation mutation, Cell oldCell, Cell newCell); @Override RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s, final List<Result> result,
final int limit, final boolean hasNext); @Override void preScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void postScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> familyPaths); @Override void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
PrepareBulkLoadRequest request); @Override void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
CleanupBulkLoadRequest request); @Override Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request); @Override void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request, Message.Builder responseBuilder); @Override void grant(RpcController controller,
AccessControlProtos.GrantRequest request,
RpcCallback<AccessControlProtos.GrantResponse> done); @Override void revoke(RpcController controller,
AccessControlProtos.RevokeRequest request,
RpcCallback<AccessControlProtos.RevokeResponse> done); @Override void getUserPermissions(RpcController controller,
AccessControlProtos.GetUserPermissionsRequest request,
RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done); @Override void checkPermissions(RpcController controller,
AccessControlProtos.CheckPermissionsRequest request,
RpcCallback<AccessControlProtos.CheckPermissionsResponse> done); @Override Service getService(); @Override void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested); @Override void preStopRegionServer(
ObserverContext<RegionServerCoprocessorEnvironment> env); @Override void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors, String regex); @Override void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
Region regionB); @Override void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
Region regionB, Region mergedRegion); @Override void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, List<Mutation> metaEntries); @Override void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, Region mergedRegion); @Override void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override void postRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override ReplicationEndpoint postCreateReplicationEndPoint(
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint); @Override void preReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final TableName tableName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final String namespace, final Quotas quotas); @Override void preSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, final Quotas quotas); @Override void preSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas); } | @Test (timeout=180000) public void testSplit() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preSplit(ObserverContext.createAndPrepare(RCP_ENV, null)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); }
@Test (timeout=180000) public void testSplitWithSplitRow() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preSplit( ObserverContext.createAndPrepare(RCP_ENV, null), TEST_ROW); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } |
AccessController extends BaseMasterAndRegionObserver implements RegionServerObserver,
AccessControlService.Interface, CoprocessorService, EndpointObserver, BulkLoadObserver { @Override public void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA, Region regionB) throws IOException { requirePermission("mergeRegions", regionA.getTableDesc().getTableName(), null, null, Action.ADMIN); } static boolean isAuthorizationSupported(Configuration conf); static boolean isCellAuthorizationSupported(Configuration conf); Region getRegion(); TableAuthManager getAuthManager(); void requireNamespacePermission(String request, String namespace,
Action... permissions); void requireNamespacePermission(String request, String namespace, TableName tableName,
Map<byte[], ? extends Collection<byte[]>> familyMap, Action... permissions); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); @Override void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void postCreateTableHandler(final ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HTableDescriptor htd); @Override void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName, final HTableDescriptor htd); @Override void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor column); @Override void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor descriptor); @Override void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
byte[] col); @Override void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName, final byte[] col); @Override void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preAbortProcedure(
ObserverContext<MasterCoprocessorEnvironment> ctx,
final ProcedureExecutor<MasterProcedureEnv> procEnv,
final long procId); @Override void postAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void postListProcedures(
ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ProcedureInfo> procInfoList); @Override void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region,
ServerName srcServer, ServerName destServer); @Override void preAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo); @Override void preUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo,
boolean force); @Override void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> c,
HRegionInfo regionInfo); @Override void preBalance(ObserverContext<MasterCoprocessorEnvironment> c); @Override boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c,
boolean newValue); @Override void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c); @Override void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c); @Override void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preListSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace); @Override void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors); @Override void preTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preOpen(ObserverContext<RegionCoprocessorEnvironment> e); @Override void postOpen(ObserverContext<RegionCoprocessorEnvironment> c); @Override void postLogReplay(ObserverContext<RegionCoprocessorEnvironment> c); @Override void preFlush(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e,
byte[] splitRow); @Override InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, final InternalScanner scanner, final ScanType scanType); @Override void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final Result result); @Override void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final List<Cell> result); @Override boolean preExists(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final boolean exists); @Override void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp); @Override void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier,
final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete,
final boolean result); @Override boolean preCheckAndDeleteAfterRowLock(
final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family,
final byte[] qualifier, final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete, final boolean result); @Override long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final long amount, final boolean writeToWAL); @Override Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append); @Override Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Append append); @Override Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Cell postMutationBeforeWAL(ObserverContext<RegionCoprocessorEnvironment> ctx,
MutationType opType, Mutation mutation, Cell oldCell, Cell newCell); @Override RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s, final List<Result> result,
final int limit, final boolean hasNext); @Override void preScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void postScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> familyPaths); @Override void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
PrepareBulkLoadRequest request); @Override void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
CleanupBulkLoadRequest request); @Override Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request); @Override void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request, Message.Builder responseBuilder); @Override void grant(RpcController controller,
AccessControlProtos.GrantRequest request,
RpcCallback<AccessControlProtos.GrantResponse> done); @Override void revoke(RpcController controller,
AccessControlProtos.RevokeRequest request,
RpcCallback<AccessControlProtos.RevokeResponse> done); @Override void getUserPermissions(RpcController controller,
AccessControlProtos.GetUserPermissionsRequest request,
RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done); @Override void checkPermissions(RpcController controller,
AccessControlProtos.CheckPermissionsRequest request,
RpcCallback<AccessControlProtos.CheckPermissionsResponse> done); @Override Service getService(); @Override void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested); @Override void preStopRegionServer(
ObserverContext<RegionServerCoprocessorEnvironment> env); @Override void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors, String regex); @Override void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
Region regionB); @Override void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
Region regionB, Region mergedRegion); @Override void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, List<Mutation> metaEntries); @Override void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, Region mergedRegion); @Override void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override void postRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override ReplicationEndpoint postCreateReplicationEndPoint(
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint); @Override void preReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final TableName tableName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final String namespace, final Quotas quotas); @Override void preSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, final Quotas quotas); @Override void preSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas); } | @Test (timeout=180000) public void testMergeRegions() throws Exception { final TableName tname = TableName.valueOf("testMergeRegions"); createTestTable(tname); try { final List<HRegion> regions = TEST_UTIL.getHBaseCluster().findRegionsForTable(tname); assertTrue("not enough regions: " + regions.size(), regions.size() >= 2); AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preMerge(ObserverContext.createAndPrepare(RSCP_ENV, null), regions.get(0), regions.get(1)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } finally { deleteTable(TEST_UTIL, tname); } } |
AccessController extends BaseMasterAndRegionObserver implements RegionServerObserver,
AccessControlService.Interface, CoprocessorService, EndpointObserver, BulkLoadObserver { @Override public void preFlush(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException { requirePermission("flush", getTableName(e.getEnvironment()), null, null, Action.ADMIN, Action.CREATE); } static boolean isAuthorizationSupported(Configuration conf); static boolean isCellAuthorizationSupported(Configuration conf); Region getRegion(); TableAuthManager getAuthManager(); void requireNamespacePermission(String request, String namespace,
Action... permissions); void requireNamespacePermission(String request, String namespace, TableName tableName,
Map<byte[], ? extends Collection<byte[]>> familyMap, Action... permissions); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); @Override void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void postCreateTableHandler(final ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HTableDescriptor htd); @Override void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName, final HTableDescriptor htd); @Override void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor column); @Override void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor descriptor); @Override void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
byte[] col); @Override void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName, final byte[] col); @Override void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preAbortProcedure(
ObserverContext<MasterCoprocessorEnvironment> ctx,
final ProcedureExecutor<MasterProcedureEnv> procEnv,
final long procId); @Override void postAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void postListProcedures(
ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ProcedureInfo> procInfoList); @Override void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region,
ServerName srcServer, ServerName destServer); @Override void preAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo); @Override void preUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo,
boolean force); @Override void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> c,
HRegionInfo regionInfo); @Override void preBalance(ObserverContext<MasterCoprocessorEnvironment> c); @Override boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c,
boolean newValue); @Override void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c); @Override void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c); @Override void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preListSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace); @Override void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors); @Override void preTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preOpen(ObserverContext<RegionCoprocessorEnvironment> e); @Override void postOpen(ObserverContext<RegionCoprocessorEnvironment> c); @Override void postLogReplay(ObserverContext<RegionCoprocessorEnvironment> c); @Override void preFlush(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e,
byte[] splitRow); @Override InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, final InternalScanner scanner, final ScanType scanType); @Override void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final Result result); @Override void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final List<Cell> result); @Override boolean preExists(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final boolean exists); @Override void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp); @Override void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier,
final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete,
final boolean result); @Override boolean preCheckAndDeleteAfterRowLock(
final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family,
final byte[] qualifier, final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete, final boolean result); @Override long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final long amount, final boolean writeToWAL); @Override Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append); @Override Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Append append); @Override Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Cell postMutationBeforeWAL(ObserverContext<RegionCoprocessorEnvironment> ctx,
MutationType opType, Mutation mutation, Cell oldCell, Cell newCell); @Override RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s, final List<Result> result,
final int limit, final boolean hasNext); @Override void preScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void postScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> familyPaths); @Override void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
PrepareBulkLoadRequest request); @Override void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
CleanupBulkLoadRequest request); @Override Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request); @Override void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request, Message.Builder responseBuilder); @Override void grant(RpcController controller,
AccessControlProtos.GrantRequest request,
RpcCallback<AccessControlProtos.GrantResponse> done); @Override void revoke(RpcController controller,
AccessControlProtos.RevokeRequest request,
RpcCallback<AccessControlProtos.RevokeResponse> done); @Override void getUserPermissions(RpcController controller,
AccessControlProtos.GetUserPermissionsRequest request,
RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done); @Override void checkPermissions(RpcController controller,
AccessControlProtos.CheckPermissionsRequest request,
RpcCallback<AccessControlProtos.CheckPermissionsResponse> done); @Override Service getService(); @Override void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested); @Override void preStopRegionServer(
ObserverContext<RegionServerCoprocessorEnvironment> env); @Override void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors, String regex); @Override void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
Region regionB); @Override void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
Region regionB, Region mergedRegion); @Override void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, List<Mutation> metaEntries); @Override void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, Region mergedRegion); @Override void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override void postRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override ReplicationEndpoint postCreateReplicationEndPoint(
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint); @Override void preReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final TableName tableName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final String namespace, final Quotas quotas); @Override void preSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, final Quotas quotas); @Override void preSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas); } | @Test (timeout=180000) public void testFlush() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preFlush(ObserverContext.createAndPrepare(RCP_ENV, null)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_GROUP_CREATE, USER_GROUP_ADMIN); verifyDenied(action, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); } |
AccessController extends BaseMasterAndRegionObserver implements RegionServerObserver,
AccessControlService.Interface, CoprocessorService, EndpointObserver, BulkLoadObserver { @Override public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e, final Store store, final InternalScanner scanner, final ScanType scanType) throws IOException { requirePermission("compact", getTableName(e.getEnvironment()), null, null, Action.ADMIN, Action.CREATE); return scanner; } static boolean isAuthorizationSupported(Configuration conf); static boolean isCellAuthorizationSupported(Configuration conf); Region getRegion(); TableAuthManager getAuthManager(); void requireNamespacePermission(String request, String namespace,
Action... permissions); void requireNamespacePermission(String request, String namespace, TableName tableName,
Map<byte[], ? extends Collection<byte[]>> familyMap, Action... permissions); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); @Override void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void postCreateTableHandler(final ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HTableDescriptor htd); @Override void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName, final HTableDescriptor htd); @Override void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor column); @Override void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor descriptor); @Override void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
byte[] col); @Override void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName, final byte[] col); @Override void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preAbortProcedure(
ObserverContext<MasterCoprocessorEnvironment> ctx,
final ProcedureExecutor<MasterProcedureEnv> procEnv,
final long procId); @Override void postAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void postListProcedures(
ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ProcedureInfo> procInfoList); @Override void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region,
ServerName srcServer, ServerName destServer); @Override void preAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo); @Override void preUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo,
boolean force); @Override void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> c,
HRegionInfo regionInfo); @Override void preBalance(ObserverContext<MasterCoprocessorEnvironment> c); @Override boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c,
boolean newValue); @Override void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c); @Override void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c); @Override void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preListSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace); @Override void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors); @Override void preTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preOpen(ObserverContext<RegionCoprocessorEnvironment> e); @Override void postOpen(ObserverContext<RegionCoprocessorEnvironment> c); @Override void postLogReplay(ObserverContext<RegionCoprocessorEnvironment> c); @Override void preFlush(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e,
byte[] splitRow); @Override InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, final InternalScanner scanner, final ScanType scanType); @Override void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final Result result); @Override void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final List<Cell> result); @Override boolean preExists(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final boolean exists); @Override void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp); @Override void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier,
final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete,
final boolean result); @Override boolean preCheckAndDeleteAfterRowLock(
final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family,
final byte[] qualifier, final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete, final boolean result); @Override long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final long amount, final boolean writeToWAL); @Override Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append); @Override Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Append append); @Override Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Cell postMutationBeforeWAL(ObserverContext<RegionCoprocessorEnvironment> ctx,
MutationType opType, Mutation mutation, Cell oldCell, Cell newCell); @Override RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s, final List<Result> result,
final int limit, final boolean hasNext); @Override void preScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void postScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> familyPaths); @Override void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
PrepareBulkLoadRequest request); @Override void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
CleanupBulkLoadRequest request); @Override Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request); @Override void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request, Message.Builder responseBuilder); @Override void grant(RpcController controller,
AccessControlProtos.GrantRequest request,
RpcCallback<AccessControlProtos.GrantResponse> done); @Override void revoke(RpcController controller,
AccessControlProtos.RevokeRequest request,
RpcCallback<AccessControlProtos.RevokeResponse> done); @Override void getUserPermissions(RpcController controller,
AccessControlProtos.GetUserPermissionsRequest request,
RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done); @Override void checkPermissions(RpcController controller,
AccessControlProtos.CheckPermissionsRequest request,
RpcCallback<AccessControlProtos.CheckPermissionsResponse> done); @Override Service getService(); @Override void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested); @Override void preStopRegionServer(
ObserverContext<RegionServerCoprocessorEnvironment> env); @Override void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors, String regex); @Override void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
Region regionB); @Override void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
Region regionB, Region mergedRegion); @Override void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, List<Mutation> metaEntries); @Override void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, Region mergedRegion); @Override void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override void postRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override ReplicationEndpoint postCreateReplicationEndPoint(
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint); @Override void preReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final TableName tableName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final String namespace, final Quotas quotas); @Override void preSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, final Quotas quotas); @Override void preSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas); } | @Test (timeout=180000) public void testCompact() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preCompact(ObserverContext.createAndPrepare(RCP_ENV, null), null, null, ScanType.COMPACT_RETAIN_DELETES); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_OWNER, USER_CREATE, USER_GROUP_CREATE, USER_GROUP_ADMIN); verifyDenied(action, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE); } |
AccessController extends BaseMasterAndRegionObserver implements RegionServerObserver,
AccessControlService.Interface, CoprocessorService, EndpointObserver, BulkLoadObserver { @Override public void getUserPermissions(RpcController controller, AccessControlProtos.GetUserPermissionsRequest request, RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done) { AccessControlProtos.GetUserPermissionsResponse response = null; try { if (aclRegion) { if (!initialized) { throw new CoprocessorException("AccessController not yet initialized"); } List<UserPermission> perms = null; if (request.getType() == AccessControlProtos.Permission.Type.Table) { final TableName table = request.hasTableName() ? ProtobufUtil.toTableName(request.getTableName()) : null; requirePermission("userPermissions", table, null, null, Action.ADMIN); perms = User.runAsLoginUser(new PrivilegedExceptionAction<List<UserPermission>>() { @Override public List<UserPermission> run() throws Exception { return AccessControlLists.getUserTablePermissions(regionEnv.getConfiguration(), table); } }); } else if (request.getType() == AccessControlProtos.Permission.Type.Namespace) { final String namespace = request.getNamespaceName().toStringUtf8(); requireNamespacePermission("userPermissions", namespace, Action.ADMIN); perms = User.runAsLoginUser(new PrivilegedExceptionAction<List<UserPermission>>() { @Override public List<UserPermission> run() throws Exception { return AccessControlLists.getUserNamespacePermissions(regionEnv.getConfiguration(), namespace); } }); } else { requirePermission("userPermissions", Action.ADMIN); perms = User.runAsLoginUser(new PrivilegedExceptionAction<List<UserPermission>>() { @Override public List<UserPermission> run() throws Exception { return AccessControlLists.getUserPermissions(regionEnv.getConfiguration(), null); } }); } response = ResponseConverter.buildGetUserPermissionsResponse(perms); } else { throw new CoprocessorException(AccessController.class, "This method " + "can only execute at " + AccessControlLists.ACL_TABLE_NAME + " table."); } } catch (IOException ioe) { ResponseConverter.setControllerException(controller, ioe); } done.run(response); } static boolean isAuthorizationSupported(Configuration conf); static boolean isCellAuthorizationSupported(Configuration conf); Region getRegion(); TableAuthManager getAuthManager(); void requireNamespacePermission(String request, String namespace,
Action... permissions); void requireNamespacePermission(String request, String namespace, TableName tableName,
Map<byte[], ? extends Collection<byte[]>> familyMap, Action... permissions); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); @Override void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void postCreateTableHandler(final ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HTableDescriptor htd); @Override void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName, final HTableDescriptor htd); @Override void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor column); @Override void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor descriptor); @Override void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
byte[] col); @Override void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName, final byte[] col); @Override void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preAbortProcedure(
ObserverContext<MasterCoprocessorEnvironment> ctx,
final ProcedureExecutor<MasterProcedureEnv> procEnv,
final long procId); @Override void postAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void postListProcedures(
ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ProcedureInfo> procInfoList); @Override void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region,
ServerName srcServer, ServerName destServer); @Override void preAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo); @Override void preUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo,
boolean force); @Override void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> c,
HRegionInfo regionInfo); @Override void preBalance(ObserverContext<MasterCoprocessorEnvironment> c); @Override boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c,
boolean newValue); @Override void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c); @Override void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c); @Override void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preListSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace); @Override void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors); @Override void preTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preOpen(ObserverContext<RegionCoprocessorEnvironment> e); @Override void postOpen(ObserverContext<RegionCoprocessorEnvironment> c); @Override void postLogReplay(ObserverContext<RegionCoprocessorEnvironment> c); @Override void preFlush(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e,
byte[] splitRow); @Override InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, final InternalScanner scanner, final ScanType scanType); @Override void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final Result result); @Override void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final List<Cell> result); @Override boolean preExists(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final boolean exists); @Override void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp); @Override void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier,
final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete,
final boolean result); @Override boolean preCheckAndDeleteAfterRowLock(
final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family,
final byte[] qualifier, final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete, final boolean result); @Override long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final long amount, final boolean writeToWAL); @Override Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append); @Override Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Append append); @Override Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Cell postMutationBeforeWAL(ObserverContext<RegionCoprocessorEnvironment> ctx,
MutationType opType, Mutation mutation, Cell oldCell, Cell newCell); @Override RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s, final List<Result> result,
final int limit, final boolean hasNext); @Override void preScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void postScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> familyPaths); @Override void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
PrepareBulkLoadRequest request); @Override void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
CleanupBulkLoadRequest request); @Override Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request); @Override void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request, Message.Builder responseBuilder); @Override void grant(RpcController controller,
AccessControlProtos.GrantRequest request,
RpcCallback<AccessControlProtos.GrantResponse> done); @Override void revoke(RpcController controller,
AccessControlProtos.RevokeRequest request,
RpcCallback<AccessControlProtos.RevokeResponse> done); @Override void getUserPermissions(RpcController controller,
AccessControlProtos.GetUserPermissionsRequest request,
RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done); @Override void checkPermissions(RpcController controller,
AccessControlProtos.CheckPermissionsRequest request,
RpcCallback<AccessControlProtos.CheckPermissionsResponse> done); @Override Service getService(); @Override void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested); @Override void preStopRegionServer(
ObserverContext<RegionServerCoprocessorEnvironment> env); @Override void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors, String regex); @Override void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
Region regionB); @Override void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
Region regionB, Region mergedRegion); @Override void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, List<Mutation> metaEntries); @Override void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, Region mergedRegion); @Override void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override void postRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override ReplicationEndpoint postCreateReplicationEndPoint(
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint); @Override void preReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final TableName tableName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final String namespace, final Quotas quotas); @Override void preSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, final Quotas quotas); @Override void preSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas); } | @Test (timeout=180000) public void testPermissionList() throws Exception { final TableName tableName = TableName.valueOf("testPermissionList"); final byte[] family1 = Bytes.toBytes("f1"); final byte[] family2 = Bytes.toBytes("f2"); final byte[] qualifier = Bytes.toBytes("q"); Admin admin = TEST_UTIL.getHBaseAdmin(); if (admin.tableExists(tableName)) { deleteTable(TEST_UTIL, tableName); } HTableDescriptor htd = new HTableDescriptor(tableName); htd.addFamily(new HColumnDescriptor(family1)); htd.addFamily(new HColumnDescriptor(family2)); htd.setOwner(USER_OWNER); createTable(TEST_UTIL, htd); try { List<UserPermission> perms; Table acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); perms = ProtobufUtil.getUserPermissions(null, protocol, tableName); } finally { acl.close(); } UserPermission ownerperm = new UserPermission(Bytes.toBytes(USER_OWNER.getName()), tableName, null, Action.values()); assertTrue("Owner should have all permissions on table", hasFoundUserPermission(ownerperm, perms)); User user = User.createUserForTesting(TEST_UTIL.getConfiguration(), "user", new String[0]); byte[] userName = Bytes.toBytes(user.getShortName()); UserPermission up = new UserPermission(userName, tableName, family1, qualifier, Permission.Action.READ); assertFalse("User should not be granted permission: " + up.toString(), hasFoundUserPermission(up, perms)); grantOnTable(TEST_UTIL, user.getShortName(), tableName, family1, qualifier, Permission.Action.READ); acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); perms = ProtobufUtil.getUserPermissions(null, protocol, tableName); } finally { acl.close(); } UserPermission upToVerify = new UserPermission(userName, tableName, family1, qualifier, Permission.Action.READ); assertTrue("User should be granted permission: " + upToVerify.toString(), hasFoundUserPermission(upToVerify, perms)); upToVerify = new UserPermission(userName, tableName, family1, qualifier, Permission.Action.WRITE); assertFalse("User should not be granted permission: " + upToVerify.toString(), hasFoundUserPermission(upToVerify, perms)); grantOnTable(TEST_UTIL, user.getShortName(), tableName, family1, qualifier, Permission.Action.WRITE, Permission.Action.READ); acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); perms = ProtobufUtil.getUserPermissions(null, protocol, tableName); } finally { acl.close(); } upToVerify = new UserPermission(userName, tableName, family1, qualifier, Permission.Action.WRITE, Permission.Action.READ); assertTrue("User should be granted permission: " + upToVerify.toString(), hasFoundUserPermission(upToVerify, perms)); revokeFromTable(TEST_UTIL, user.getShortName(), tableName, family1, qualifier, Permission.Action.WRITE, Permission.Action.READ); acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); perms = ProtobufUtil.getUserPermissions(null, protocol, tableName); } finally { acl.close(); } assertFalse("User should not be granted permission: " + upToVerify.toString(), hasFoundUserPermission(upToVerify, perms)); admin.disableTable(tableName); User newOwner = User.createUserForTesting(conf, "new_owner", new String[] {}); htd.setOwner(newOwner); admin.modifyTable(tableName, htd); acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(tableName.getName()); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); perms = ProtobufUtil.getUserPermissions(null, protocol, tableName); } finally { acl.close(); } UserPermission newOwnerperm = new UserPermission(Bytes.toBytes(newOwner.getName()), tableName, null, Action.values()); assertTrue("New owner should have all permissions on table", hasFoundUserPermission(newOwnerperm, perms)); } finally { deleteTable(TEST_UTIL, tableName); } }
@Test (timeout=180000) public void testGlobalPermissionList() throws Exception { List<UserPermission> perms; Table acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel service = acl.coprocessorService(HConstants.EMPTY_START_ROW); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(service); perms = ProtobufUtil.getUserPermissions(null, protocol); } finally { acl.close(); } UserPermission adminPerm = new UserPermission(Bytes.toBytes(USER_ADMIN.getShortName()), AccessControlLists.ACL_TABLE_NAME, null, null, Bytes.toBytes("ACRW")); assertTrue("Only global users and user admin has permission on table _acl_ per setup", perms.size() == 5 && hasFoundUserPermission(adminPerm, perms)); }
@Test (timeout=180000) public void testGetNamespacePermission() throws Exception { String namespace = "testGetNamespacePermission"; NamespaceDescriptor desc = NamespaceDescriptor.create(namespace).build(); createNamespace(TEST_UTIL, desc); grantOnNamespace(TEST_UTIL, USER_NONE.getShortName(), namespace, Permission.Action.READ); try { List<UserPermission> namespacePermissions = AccessControlClient.getUserPermissions( systemUserConnection, AccessControlLists.toNamespaceEntry(namespace)); assertTrue(namespacePermissions != null); assertTrue(namespacePermissions.size() == 1); } catch (Throwable thw) { throw new HBaseException(thw); } deleteNamespace(TEST_UTIL, namespace); }
@Test (timeout=180000) public void testTruncatePerms() throws Exception { try { List<UserPermission> existingPerms = AccessControlClient.getUserPermissions( systemUserConnection, TEST_TABLE.getNameAsString()); assertTrue(existingPerms != null); assertTrue(existingPerms.size() > 1); TEST_UTIL.getHBaseAdmin().disableTable(TEST_TABLE); TEST_UTIL.truncateTable(TEST_TABLE); TEST_UTIL.waitTableAvailable(TEST_TABLE); List<UserPermission> perms = AccessControlClient.getUserPermissions( systemUserConnection, TEST_TABLE.getNameAsString()); assertTrue(perms != null); assertEquals(existingPerms.size(), perms.size()); } catch (Throwable e) { throw new HBaseIOException(e); } } |
AccessController extends BaseMasterAndRegionObserver implements RegionServerObserver,
AccessControlService.Interface, CoprocessorService, EndpointObserver, BulkLoadObserver { @Override public void checkPermissions(RpcController controller, AccessControlProtos.CheckPermissionsRequest request, RpcCallback<AccessControlProtos.CheckPermissionsResponse> done) { Permission[] permissions = new Permission[request.getPermissionCount()]; for (int i=0; i < request.getPermissionCount(); i++) { permissions[i] = ProtobufUtil.toPermission(request.getPermission(i)); } AccessControlProtos.CheckPermissionsResponse response = null; try { User user = getActiveUser(); TableName tableName = regionEnv.getRegion().getTableDesc().getTableName(); for (Permission permission : permissions) { if (permission instanceof TablePermission) { TablePermission tperm = (TablePermission) permission; for (Action action : permission.getActions()) { if (!tperm.getTableName().equals(tableName)) { throw new CoprocessorException(AccessController.class, String.format("This method " + "can only execute at the table specified in TablePermission. " + "Table of the region:%s , requested table:%s", tableName, tperm.getTableName())); } Map<byte[], Set<byte[]>> familyMap = new TreeMap<byte[], Set<byte[]>>(Bytes.BYTES_COMPARATOR); if (tperm.getFamily() != null) { if (tperm.getQualifier() != null) { Set<byte[]> qualifiers = Sets.newTreeSet(Bytes.BYTES_COMPARATOR); qualifiers.add(tperm.getQualifier()); familyMap.put(tperm.getFamily(), qualifiers); } else { familyMap.put(tperm.getFamily(), null); } } AuthResult result = permissionGranted("checkPermissions", user, action, regionEnv, familyMap); logResult(result); if (!result.isAllowed()) { throw new AccessDeniedException("Insufficient permissions (table=" + tableName + (familyMap.size() > 0 ? ", family: " + result.toFamilyString() : "") + ", action=" + action.toString() + ")"); } } } else { for (Action action : permission.getActions()) { AuthResult result; if (authManager.authorize(user, action)) { result = AuthResult.allow("checkPermissions", "Global action allowed", user, action, null, null); } else { result = AuthResult.deny("checkPermissions", "Global action denied", user, action, null, null); } logResult(result); if (!result.isAllowed()) { throw new AccessDeniedException("Insufficient permissions (action=" + action.toString() + ")"); } } } } response = AccessControlProtos.CheckPermissionsResponse.getDefaultInstance(); } catch (IOException ioe) { ResponseConverter.setControllerException(controller, ioe); } done.run(response); } static boolean isAuthorizationSupported(Configuration conf); static boolean isCellAuthorizationSupported(Configuration conf); Region getRegion(); TableAuthManager getAuthManager(); void requireNamespacePermission(String request, String namespace,
Action... permissions); void requireNamespacePermission(String request, String namespace, TableName tableName,
Map<byte[], ? extends Collection<byte[]>> familyMap, Action... permissions); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); @Override void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void postCreateTableHandler(final ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HTableDescriptor htd); @Override void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName, final HTableDescriptor htd); @Override void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor column); @Override void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor descriptor); @Override void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
byte[] col); @Override void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName, final byte[] col); @Override void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preAbortProcedure(
ObserverContext<MasterCoprocessorEnvironment> ctx,
final ProcedureExecutor<MasterProcedureEnv> procEnv,
final long procId); @Override void postAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void postListProcedures(
ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ProcedureInfo> procInfoList); @Override void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region,
ServerName srcServer, ServerName destServer); @Override void preAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo); @Override void preUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo,
boolean force); @Override void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> c,
HRegionInfo regionInfo); @Override void preBalance(ObserverContext<MasterCoprocessorEnvironment> c); @Override boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c,
boolean newValue); @Override void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c); @Override void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c); @Override void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preListSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace); @Override void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors); @Override void preTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preOpen(ObserverContext<RegionCoprocessorEnvironment> e); @Override void postOpen(ObserverContext<RegionCoprocessorEnvironment> c); @Override void postLogReplay(ObserverContext<RegionCoprocessorEnvironment> c); @Override void preFlush(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e,
byte[] splitRow); @Override InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, final InternalScanner scanner, final ScanType scanType); @Override void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final Result result); @Override void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final List<Cell> result); @Override boolean preExists(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final boolean exists); @Override void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp); @Override void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier,
final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete,
final boolean result); @Override boolean preCheckAndDeleteAfterRowLock(
final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family,
final byte[] qualifier, final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete, final boolean result); @Override long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final long amount, final boolean writeToWAL); @Override Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append); @Override Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Append append); @Override Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Cell postMutationBeforeWAL(ObserverContext<RegionCoprocessorEnvironment> ctx,
MutationType opType, Mutation mutation, Cell oldCell, Cell newCell); @Override RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s, final List<Result> result,
final int limit, final boolean hasNext); @Override void preScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void postScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> familyPaths); @Override void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
PrepareBulkLoadRequest request); @Override void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
CleanupBulkLoadRequest request); @Override Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request); @Override void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request, Message.Builder responseBuilder); @Override void grant(RpcController controller,
AccessControlProtos.GrantRequest request,
RpcCallback<AccessControlProtos.GrantResponse> done); @Override void revoke(RpcController controller,
AccessControlProtos.RevokeRequest request,
RpcCallback<AccessControlProtos.RevokeResponse> done); @Override void getUserPermissions(RpcController controller,
AccessControlProtos.GetUserPermissionsRequest request,
RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done); @Override void checkPermissions(RpcController controller,
AccessControlProtos.CheckPermissionsRequest request,
RpcCallback<AccessControlProtos.CheckPermissionsResponse> done); @Override Service getService(); @Override void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested); @Override void preStopRegionServer(
ObserverContext<RegionServerCoprocessorEnvironment> env); @Override void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors, String regex); @Override void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
Region regionB); @Override void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
Region regionB, Region mergedRegion); @Override void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, List<Mutation> metaEntries); @Override void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, Region mergedRegion); @Override void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override void postRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override ReplicationEndpoint postCreateReplicationEndPoint(
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint); @Override void preReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final TableName tableName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final String namespace, final Quotas quotas); @Override void preSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, final Quotas quotas); @Override void preSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas); } | @Test (timeout=180000) public void testCheckPermissions() throws Exception { AccessTestAction globalAdmin = new AccessTestAction() { @Override public Void run() throws Exception { checkGlobalPerms(TEST_UTIL, Permission.Action.ADMIN); return null; } }; verifyGlobal(globalAdmin); AccessTestAction globalReadWrite = new AccessTestAction() { @Override public Void run() throws Exception { checkGlobalPerms(TEST_UTIL, Permission.Action.READ, Permission.Action.WRITE); return null; } }; verifyGlobal(globalReadWrite); final byte[] TEST_Q1 = Bytes.toBytes("q1"); final byte[] TEST_Q2 = Bytes.toBytes("q2"); User userTable = User.createUserForTesting(conf, "user_check_perms_table", new String[0]); User userColumn = User.createUserForTesting(conf, "user_check_perms_family", new String[0]); User userQualifier = User.createUserForTesting(conf, "user_check_perms_q", new String[0]); grantOnTable(TEST_UTIL, userTable.getShortName(), TEST_TABLE, null, null, Permission.Action.READ); grantOnTable(TEST_UTIL, userColumn.getShortName(), TEST_TABLE, TEST_FAMILY, null, Permission.Action.READ); grantOnTable(TEST_UTIL, userQualifier.getShortName(), TEST_TABLE, TEST_FAMILY, TEST_Q1, Permission.Action.READ); try { AccessTestAction tableRead = new AccessTestAction() { @Override public Void run() throws Exception { checkTablePerms(TEST_UTIL, TEST_TABLE, null, null, Permission.Action.READ); return null; } }; AccessTestAction columnRead = new AccessTestAction() { @Override public Void run() throws Exception { checkTablePerms(TEST_UTIL, TEST_TABLE, TEST_FAMILY, null, Permission.Action.READ); return null; } }; AccessTestAction qualifierRead = new AccessTestAction() { @Override public Void run() throws Exception { checkTablePerms(TEST_UTIL, TEST_TABLE, TEST_FAMILY, TEST_Q1, Permission.Action.READ); return null; } }; AccessTestAction multiQualifierRead = new AccessTestAction() { @Override public Void run() throws Exception { checkTablePerms(TEST_UTIL, TEST_TABLE, new Permission[] { new TablePermission(TEST_TABLE, TEST_FAMILY, TEST_Q1, Permission.Action.READ), new TablePermission(TEST_TABLE, TEST_FAMILY, TEST_Q2, Permission.Action.READ), }); return null; } }; AccessTestAction globalAndTableRead = new AccessTestAction() { @Override public Void run() throws Exception { checkTablePerms(TEST_UTIL, TEST_TABLE, new Permission[] { new Permission(Permission.Action.READ), new TablePermission(TEST_TABLE, null, (byte[]) null, Permission.Action.READ), }); return null; } }; AccessTestAction noCheck = new AccessTestAction() { @Override public Void run() throws Exception { checkTablePerms(TEST_UTIL, TEST_TABLE, new Permission[0]); return null; } }; verifyAllowed(tableRead, SUPERUSER, userTable); verifyDenied(tableRead, userColumn, userQualifier); verifyAllowed(columnRead, SUPERUSER, userTable, userColumn); verifyDenied(columnRead, userQualifier); verifyAllowed(qualifierRead, SUPERUSER, userTable, userColumn, userQualifier); verifyAllowed(multiQualifierRead, SUPERUSER, userTable, userColumn); verifyDenied(multiQualifierRead, userQualifier); verifyAllowed(globalAndTableRead, SUPERUSER); verifyDenied(globalAndTableRead, userTable, userColumn, userQualifier); verifyAllowed(noCheck, SUPERUSER, userTable, userColumn, userQualifier); AccessTestAction familyReadWrite = new AccessTestAction() { @Override public Void run() throws Exception { checkTablePerms(TEST_UTIL, TEST_TABLE, TEST_FAMILY, null, Permission.Action.READ, Permission.Action.WRITE); return null; } }; verifyAllowed(familyReadWrite, SUPERUSER, USER_OWNER, USER_CREATE, USER_RW); verifyDenied(familyReadWrite, USER_NONE, USER_RO); CheckPermissionsRequest checkRequest = CheckPermissionsRequest .newBuilder() .addPermission( AccessControlProtos.Permission .newBuilder() .setType(AccessControlProtos.Permission.Type.Table) .setTablePermission( AccessControlProtos.TablePermission.newBuilder() .setTableName(ProtobufUtil.toProtoTableName(TEST_TABLE)) .addAction(AccessControlProtos.Permission.Action.CREATE))).build(); Table acl = systemUserConnection.getTable(AccessControlLists.ACL_TABLE_NAME); try { BlockingRpcChannel channel = acl.coprocessorService(new byte[0]); AccessControlService.BlockingInterface protocol = AccessControlService.newBlockingStub(channel); try { protocol.checkPermissions(null, checkRequest); fail("this should have thrown CoprocessorException"); } catch (ServiceException ex) { } } finally { acl.close(); } } finally { revokeFromTable(TEST_UTIL, userTable.getShortName(), TEST_TABLE, null, null, Permission.Action.READ); revokeFromTable(TEST_UTIL, userColumn.getShortName(), TEST_TABLE, TEST_FAMILY, null, Permission.Action.READ); revokeFromTable(TEST_UTIL, userQualifier.getShortName(), TEST_TABLE, TEST_FAMILY, TEST_Q1, Permission.Action.READ); } } |
AccessController extends BaseMasterAndRegionObserver implements RegionServerObserver,
AccessControlService.Interface, CoprocessorService, EndpointObserver, BulkLoadObserver { @Override public void preStopRegionServer( ObserverContext<RegionServerCoprocessorEnvironment> env) throws IOException { requirePermission("preStopRegionServer", Action.ADMIN); } static boolean isAuthorizationSupported(Configuration conf); static boolean isCellAuthorizationSupported(Configuration conf); Region getRegion(); TableAuthManager getAuthManager(); void requireNamespacePermission(String request, String namespace,
Action... permissions); void requireNamespacePermission(String request, String namespace, TableName tableName,
Map<byte[], ? extends Collection<byte[]>> familyMap, Action... permissions); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); @Override void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void postCreateTableHandler(final ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HTableDescriptor htd); @Override void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName, final HTableDescriptor htd); @Override void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor column); @Override void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor descriptor); @Override void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
byte[] col); @Override void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName, final byte[] col); @Override void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preAbortProcedure(
ObserverContext<MasterCoprocessorEnvironment> ctx,
final ProcedureExecutor<MasterProcedureEnv> procEnv,
final long procId); @Override void postAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void postListProcedures(
ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ProcedureInfo> procInfoList); @Override void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region,
ServerName srcServer, ServerName destServer); @Override void preAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo); @Override void preUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo,
boolean force); @Override void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> c,
HRegionInfo regionInfo); @Override void preBalance(ObserverContext<MasterCoprocessorEnvironment> c); @Override boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c,
boolean newValue); @Override void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c); @Override void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c); @Override void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preListSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace); @Override void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors); @Override void preTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preOpen(ObserverContext<RegionCoprocessorEnvironment> e); @Override void postOpen(ObserverContext<RegionCoprocessorEnvironment> c); @Override void postLogReplay(ObserverContext<RegionCoprocessorEnvironment> c); @Override void preFlush(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e,
byte[] splitRow); @Override InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, final InternalScanner scanner, final ScanType scanType); @Override void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final Result result); @Override void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final List<Cell> result); @Override boolean preExists(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final boolean exists); @Override void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp); @Override void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier,
final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete,
final boolean result); @Override boolean preCheckAndDeleteAfterRowLock(
final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family,
final byte[] qualifier, final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete, final boolean result); @Override long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final long amount, final boolean writeToWAL); @Override Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append); @Override Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Append append); @Override Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Cell postMutationBeforeWAL(ObserverContext<RegionCoprocessorEnvironment> ctx,
MutationType opType, Mutation mutation, Cell oldCell, Cell newCell); @Override RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s, final List<Result> result,
final int limit, final boolean hasNext); @Override void preScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void postScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> familyPaths); @Override void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
PrepareBulkLoadRequest request); @Override void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
CleanupBulkLoadRequest request); @Override Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request); @Override void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request, Message.Builder responseBuilder); @Override void grant(RpcController controller,
AccessControlProtos.GrantRequest request,
RpcCallback<AccessControlProtos.GrantResponse> done); @Override void revoke(RpcController controller,
AccessControlProtos.RevokeRequest request,
RpcCallback<AccessControlProtos.RevokeResponse> done); @Override void getUserPermissions(RpcController controller,
AccessControlProtos.GetUserPermissionsRequest request,
RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done); @Override void checkPermissions(RpcController controller,
AccessControlProtos.CheckPermissionsRequest request,
RpcCallback<AccessControlProtos.CheckPermissionsResponse> done); @Override Service getService(); @Override void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested); @Override void preStopRegionServer(
ObserverContext<RegionServerCoprocessorEnvironment> env); @Override void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors, String regex); @Override void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
Region regionB); @Override void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
Region regionB, Region mergedRegion); @Override void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, List<Mutation> metaEntries); @Override void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, Region mergedRegion); @Override void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override void postRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override ReplicationEndpoint postCreateReplicationEndPoint(
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint); @Override void preReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final TableName tableName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final String namespace, final Quotas quotas); @Override void preSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, final Quotas quotas); @Override void preSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas); } | @Test (timeout=180000) public void testStopRegionServer() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preStopRegionServer(ObserverContext.createAndPrepare(RSCP_ENV, null)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } |
AccessController extends BaseMasterAndRegionObserver implements RegionServerObserver,
AccessControlService.Interface, CoprocessorService, EndpointObserver, BulkLoadObserver { @Override public void preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx) throws IOException { requirePermission("preRollLogWriterRequest", Permission.Action.ADMIN); } static boolean isAuthorizationSupported(Configuration conf); static boolean isCellAuthorizationSupported(Configuration conf); Region getRegion(); TableAuthManager getAuthManager(); void requireNamespacePermission(String request, String namespace,
Action... permissions); void requireNamespacePermission(String request, String namespace, TableName tableName,
Map<byte[], ? extends Collection<byte[]>> familyMap, Action... permissions); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); @Override void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void postCreateTableHandler(final ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HTableDescriptor htd); @Override void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName, final HTableDescriptor htd); @Override void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor column); @Override void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor descriptor); @Override void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
byte[] col); @Override void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName, final byte[] col); @Override void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preAbortProcedure(
ObserverContext<MasterCoprocessorEnvironment> ctx,
final ProcedureExecutor<MasterProcedureEnv> procEnv,
final long procId); @Override void postAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void postListProcedures(
ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ProcedureInfo> procInfoList); @Override void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region,
ServerName srcServer, ServerName destServer); @Override void preAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo); @Override void preUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo,
boolean force); @Override void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> c,
HRegionInfo regionInfo); @Override void preBalance(ObserverContext<MasterCoprocessorEnvironment> c); @Override boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c,
boolean newValue); @Override void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c); @Override void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c); @Override void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preListSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace); @Override void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors); @Override void preTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preOpen(ObserverContext<RegionCoprocessorEnvironment> e); @Override void postOpen(ObserverContext<RegionCoprocessorEnvironment> c); @Override void postLogReplay(ObserverContext<RegionCoprocessorEnvironment> c); @Override void preFlush(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e,
byte[] splitRow); @Override InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, final InternalScanner scanner, final ScanType scanType); @Override void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final Result result); @Override void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final List<Cell> result); @Override boolean preExists(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final boolean exists); @Override void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp); @Override void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier,
final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete,
final boolean result); @Override boolean preCheckAndDeleteAfterRowLock(
final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family,
final byte[] qualifier, final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete, final boolean result); @Override long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final long amount, final boolean writeToWAL); @Override Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append); @Override Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Append append); @Override Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Cell postMutationBeforeWAL(ObserverContext<RegionCoprocessorEnvironment> ctx,
MutationType opType, Mutation mutation, Cell oldCell, Cell newCell); @Override RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s, final List<Result> result,
final int limit, final boolean hasNext); @Override void preScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void postScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> familyPaths); @Override void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
PrepareBulkLoadRequest request); @Override void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
CleanupBulkLoadRequest request); @Override Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request); @Override void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request, Message.Builder responseBuilder); @Override void grant(RpcController controller,
AccessControlProtos.GrantRequest request,
RpcCallback<AccessControlProtos.GrantResponse> done); @Override void revoke(RpcController controller,
AccessControlProtos.RevokeRequest request,
RpcCallback<AccessControlProtos.RevokeResponse> done); @Override void getUserPermissions(RpcController controller,
AccessControlProtos.GetUserPermissionsRequest request,
RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done); @Override void checkPermissions(RpcController controller,
AccessControlProtos.CheckPermissionsRequest request,
RpcCallback<AccessControlProtos.CheckPermissionsResponse> done); @Override Service getService(); @Override void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested); @Override void preStopRegionServer(
ObserverContext<RegionServerCoprocessorEnvironment> env); @Override void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors, String regex); @Override void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
Region regionB); @Override void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
Region regionB, Region mergedRegion); @Override void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, List<Mutation> metaEntries); @Override void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, Region mergedRegion); @Override void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override void postRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override ReplicationEndpoint postCreateReplicationEndPoint(
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint); @Override void preReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final TableName tableName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final String namespace, final Quotas quotas); @Override void preSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, final Quotas quotas); @Override void preSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas); } | @Test (timeout=180000) public void testRollWALWriterRequest() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preRollWALWriterRequest(ObserverContext.createAndPrepare(RSCP_ENV, null)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_OWNER, USER_RW, USER_RO, USER_NONE, USER_GROUP_READ, USER_GROUP_WRITE, USER_GROUP_CREATE); } |
AccessController extends BaseMasterAndRegionObserver implements RegionServerObserver,
AccessControlService.Interface, CoprocessorService, EndpointObserver, BulkLoadObserver { @Override public void preOpen(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException { RegionCoprocessorEnvironment env = e.getEnvironment(); final Region region = env.getRegion(); if (region == null) { LOG.error("NULL region from RegionCoprocessorEnvironment in preOpen()"); } else { HRegionInfo regionInfo = region.getRegionInfo(); if (regionInfo.getTable().isSystemTable()) { checkSystemOrSuperUser(); } else { requirePermission("preOpen", Action.ADMIN); } } } static boolean isAuthorizationSupported(Configuration conf); static boolean isCellAuthorizationSupported(Configuration conf); Region getRegion(); TableAuthManager getAuthManager(); void requireNamespacePermission(String request, String namespace,
Action... permissions); void requireNamespacePermission(String request, String namespace, TableName tableName,
Map<byte[], ? extends Collection<byte[]>> familyMap, Action... permissions); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); @Override void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void postCreateTableHandler(final ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HTableDescriptor htd); @Override void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName, final HTableDescriptor htd); @Override void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor column); @Override void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor descriptor); @Override void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
byte[] col); @Override void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName, final byte[] col); @Override void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preAbortProcedure(
ObserverContext<MasterCoprocessorEnvironment> ctx,
final ProcedureExecutor<MasterProcedureEnv> procEnv,
final long procId); @Override void postAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void postListProcedures(
ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ProcedureInfo> procInfoList); @Override void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region,
ServerName srcServer, ServerName destServer); @Override void preAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo); @Override void preUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo,
boolean force); @Override void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> c,
HRegionInfo regionInfo); @Override void preBalance(ObserverContext<MasterCoprocessorEnvironment> c); @Override boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c,
boolean newValue); @Override void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c); @Override void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c); @Override void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preListSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace); @Override void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors); @Override void preTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preOpen(ObserverContext<RegionCoprocessorEnvironment> e); @Override void postOpen(ObserverContext<RegionCoprocessorEnvironment> c); @Override void postLogReplay(ObserverContext<RegionCoprocessorEnvironment> c); @Override void preFlush(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e,
byte[] splitRow); @Override InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, final InternalScanner scanner, final ScanType scanType); @Override void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final Result result); @Override void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final List<Cell> result); @Override boolean preExists(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final boolean exists); @Override void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp); @Override void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier,
final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete,
final boolean result); @Override boolean preCheckAndDeleteAfterRowLock(
final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family,
final byte[] qualifier, final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete, final boolean result); @Override long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final long amount, final boolean writeToWAL); @Override Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append); @Override Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Append append); @Override Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Cell postMutationBeforeWAL(ObserverContext<RegionCoprocessorEnvironment> ctx,
MutationType opType, Mutation mutation, Cell oldCell, Cell newCell); @Override RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s, final List<Result> result,
final int limit, final boolean hasNext); @Override void preScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void postScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> familyPaths); @Override void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
PrepareBulkLoadRequest request); @Override void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
CleanupBulkLoadRequest request); @Override Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request); @Override void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request, Message.Builder responseBuilder); @Override void grant(RpcController controller,
AccessControlProtos.GrantRequest request,
RpcCallback<AccessControlProtos.GrantResponse> done); @Override void revoke(RpcController controller,
AccessControlProtos.RevokeRequest request,
RpcCallback<AccessControlProtos.RevokeResponse> done); @Override void getUserPermissions(RpcController controller,
AccessControlProtos.GetUserPermissionsRequest request,
RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done); @Override void checkPermissions(RpcController controller,
AccessControlProtos.CheckPermissionsRequest request,
RpcCallback<AccessControlProtos.CheckPermissionsResponse> done); @Override Service getService(); @Override void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested); @Override void preStopRegionServer(
ObserverContext<RegionServerCoprocessorEnvironment> env); @Override void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors, String regex); @Override void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
Region regionB); @Override void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
Region regionB, Region mergedRegion); @Override void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, List<Mutation> metaEntries); @Override void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, Region mergedRegion); @Override void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override void postRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override ReplicationEndpoint postCreateReplicationEndPoint(
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint); @Override void preReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final TableName tableName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final String namespace, final Quotas quotas); @Override void preSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, final Quotas quotas); @Override void preSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas); } | @Test (timeout=180000) public void testOpenRegion() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preOpen(ObserverContext.createAndPrepare(RCP_ENV, null)); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, USER_GROUP_CREATE, USER_GROUP_READ, USER_GROUP_WRITE); } |
AccessController extends BaseMasterAndRegionObserver implements RegionServerObserver,
AccessControlService.Interface, CoprocessorService, EndpointObserver, BulkLoadObserver { @Override public void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested) throws IOException { requirePermission("preClose", Action.ADMIN); } static boolean isAuthorizationSupported(Configuration conf); static boolean isCellAuthorizationSupported(Configuration conf); Region getRegion(); TableAuthManager getAuthManager(); void requireNamespacePermission(String request, String namespace,
Action... permissions); void requireNamespacePermission(String request, String namespace, TableName tableName,
Map<byte[], ? extends Collection<byte[]>> familyMap, Action... permissions); @Override void start(CoprocessorEnvironment env); @Override void stop(CoprocessorEnvironment env); @Override void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void postCreateTableHandler(final ObserverContext<MasterCoprocessorEnvironment> c,
HTableDescriptor desc, HRegionInfo[] regions); @Override void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void preTruncateTable(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName); @Override void postTruncateTable(ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HTableDescriptor htd); @Override void postModifyTable(ObserverContext<MasterCoprocessorEnvironment> c,
TableName tableName, final HTableDescriptor htd); @Override void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor column); @Override void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
HColumnDescriptor descriptor); @Override void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName,
byte[] col); @Override void postDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c,
final TableName tableName, final byte[] col); @Override void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName); @Override void preAbortProcedure(
ObserverContext<MasterCoprocessorEnvironment> ctx,
final ProcedureExecutor<MasterProcedureEnv> procEnv,
final long procId); @Override void postAbortProcedure(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preListProcedures(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void postListProcedures(
ObserverContext<MasterCoprocessorEnvironment> ctx,
List<ProcedureInfo> procInfoList); @Override void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region,
ServerName srcServer, ServerName destServer); @Override void preAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo); @Override void preUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo,
boolean force); @Override void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> c,
HRegionInfo regionInfo); @Override void preBalance(ObserverContext<MasterCoprocessorEnvironment> c); @Override boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c,
boolean newValue); @Override void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c); @Override void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c); @Override void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx); @Override void preSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preListSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCloneSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preRestoreSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot, final HTableDescriptor hTableDescriptor); @Override void preDeleteSnapshot(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final SnapshotDescription snapshot); @Override void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace); @Override void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
NamespaceDescriptor ns); @Override void preGetNamespaceDescriptor(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace); @Override void postListNamespaceDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<NamespaceDescriptor> descriptors); @Override void preTableFlush(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName); @Override void preOpen(ObserverContext<RegionCoprocessorEnvironment> e); @Override void postOpen(ObserverContext<RegionCoprocessorEnvironment> c); @Override void postLogReplay(ObserverContext<RegionCoprocessorEnvironment> c); @Override void preFlush(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e); @Override void preSplit(ObserverContext<RegionCoprocessorEnvironment> e,
byte[] splitRow); @Override InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e,
final Store store, final InternalScanner scanner, final ScanType scanType); @Override void preGetClosestRowBefore(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final Result result); @Override void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final List<Cell> result); @Override boolean preExists(final ObserverContext<RegionCoprocessorEnvironment> c,
final Get get, final boolean exists); @Override void prePut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void postPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final Put put, final WALEdit edit, final Durability durability); @Override void preDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override void preBatchMutate(ObserverContext<RegionCoprocessorEnvironment> c,
MiniBatchOperationInProgress<Mutation> miniBatchOp); @Override void postDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final Delete delete, final WALEdit edit, final Durability durability); @Override boolean preCheckAndPut(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndPutAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte[] row, final byte[] family, final byte[] qualifier,
final CompareFilter.CompareOp compareOp, final ByteArrayComparable comparator, final Put put,
final boolean result); @Override boolean preCheckAndDelete(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete,
final boolean result); @Override boolean preCheckAndDeleteAfterRowLock(
final ObserverContext<RegionCoprocessorEnvironment> c, final byte[] row, final byte[] family,
final byte[] qualifier, final CompareFilter.CompareOp compareOp,
final ByteArrayComparable comparator, final Delete delete, final boolean result); @Override long preIncrementColumnValue(final ObserverContext<RegionCoprocessorEnvironment> c,
final byte [] row, final byte [] family, final byte [] qualifier,
final long amount, final boolean writeToWAL); @Override Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append); @Override Result preAppendAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Append append); @Override Result preIncrement(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Result preIncrementAfterRowLock(final ObserverContext<RegionCoprocessorEnvironment> c,
final Increment increment); @Override Cell postMutationBeforeWAL(ObserverContext<RegionCoprocessorEnvironment> ctx,
MutationType opType, Mutation mutation, Cell oldCell, Cell newCell); @Override RegionScanner preScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override RegionScanner postScannerOpen(final ObserverContext<RegionCoprocessorEnvironment> c,
final Scan scan, final RegionScanner s); @Override boolean preScannerNext(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s, final List<Result> result,
final int limit, final boolean hasNext); @Override void preScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void postScannerClose(final ObserverContext<RegionCoprocessorEnvironment> c,
final InternalScanner s); @Override void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx,
List<Pair<byte[], String>> familyPaths); @Override void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
PrepareBulkLoadRequest request); @Override void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx,
CleanupBulkLoadRequest request); @Override Message preEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request); @Override void postEndpointInvocation(ObserverContext<RegionCoprocessorEnvironment> ctx,
Service service, String methodName, Message request, Message.Builder responseBuilder); @Override void grant(RpcController controller,
AccessControlProtos.GrantRequest request,
RpcCallback<AccessControlProtos.GrantResponse> done); @Override void revoke(RpcController controller,
AccessControlProtos.RevokeRequest request,
RpcCallback<AccessControlProtos.RevokeResponse> done); @Override void getUserPermissions(RpcController controller,
AccessControlProtos.GetUserPermissionsRequest request,
RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done); @Override void checkPermissions(RpcController controller,
AccessControlProtos.CheckPermissionsRequest request,
RpcCallback<AccessControlProtos.CheckPermissionsResponse> done); @Override Service getService(); @Override void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested); @Override void preStopRegionServer(
ObserverContext<RegionServerCoprocessorEnvironment> env); @Override void preGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<TableName> tableNamesList, List<HTableDescriptor> descriptors,
String regex); @Override void postGetTableNames(ObserverContext<MasterCoprocessorEnvironment> ctx,
List<HTableDescriptor> descriptors, String regex); @Override void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA,
Region regionB); @Override void postMerge(ObserverContext<RegionServerCoprocessorEnvironment> c, Region regionA,
Region regionB, Region mergedRegion); @Override void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, List<Mutation> metaEntries); @Override void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB, Region mergedRegion); @Override void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void postRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
Region regionA, Region regionB); @Override void preRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override void postRollWALWriterRequest(ObserverContext<RegionServerCoprocessorEnvironment> ctx); @Override ReplicationEndpoint postCreateReplicationEndPoint(
ObserverContext<RegionServerCoprocessorEnvironment> ctx, ReplicationEndpoint endpoint); @Override void preReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void postReplicateLogEntries(ObserverContext<RegionServerCoprocessorEnvironment> ctx,
List<WALEntry> entries, CellScanner cells); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final TableName tableName, final Quotas quotas); @Override void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String userName, final String namespace, final Quotas quotas); @Override void preSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final TableName tableName, final Quotas quotas); @Override void preSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx,
final String namespace, final Quotas quotas); } | @Test (timeout=180000) public void testCloseRegion() throws Exception { AccessTestAction action = new AccessTestAction() { @Override public Object run() throws Exception { ACCESS_CONTROLLER.preClose(ObserverContext.createAndPrepare(RCP_ENV, null), false); return null; } }; verifyAllowed(action, SUPERUSER, USER_ADMIN, USER_GROUP_ADMIN); verifyDenied(action, USER_CREATE, USER_RW, USER_RO, USER_NONE, USER_OWNER, USER_GROUP_CREATE, USER_GROUP_READ, USER_GROUP_WRITE); } |
AuthenticationKey implements Writable { @Override public int hashCode() { int result = id; result = 31 * result + (int) (expirationDate ^ (expirationDate >>> 32)); result = 31 * result + ((secret == null) ? 0 : Arrays.hashCode(secret.getEncoded())); return result; } AuthenticationKey(); AuthenticationKey(int keyId, long expirationDate, SecretKey key); int getKeyId(); long getExpiration(); void setExpiration(long timestamp); @Override int hashCode(); @Override boolean equals(Object obj); @Override String toString(); @Override void write(DataOutput out); @Override void readFields(DataInput in); } | @Test public void test() throws UnsupportedEncodingException { SecretKey secret = Mockito.mock(SecretKey.class); Mockito.when(secret.getEncoded()).thenReturn("secret".getBytes("UTF-8")); AuthenticationKey key = new AuthenticationKey(0, 1234, secret); assertEquals(key.hashCode(), new AuthenticationKey(0, 1234, secret).hashCode()); assertEquals(key, new AuthenticationKey(0, 1234, secret)); AuthenticationKey otherID = new AuthenticationKey(1, 1234, secret); assertNotEquals(key.hashCode(), otherID.hashCode()); assertNotEquals(key, otherID); AuthenticationKey otherExpiry = new AuthenticationKey(0, 8765, secret); assertNotEquals(key.hashCode(), otherExpiry.hashCode()); assertNotEquals(key, otherExpiry); SecretKey other = Mockito.mock(SecretKey.class); Mockito.when(secret.getEncoded()).thenReturn("other".getBytes("UTF-8")); AuthenticationKey otherSecret = new AuthenticationKey(0, 1234, other); assertNotEquals(key.hashCode(), otherSecret.hashCode()); assertNotEquals(key, otherSecret); } |
EnforcingScanLabelGenerator implements ScanLabelGenerator { public EnforcingScanLabelGenerator() { this.labelsCache = VisibilityLabelsCache.get(); } EnforcingScanLabelGenerator(); @Override void setConf(Configuration conf); @Override Configuration getConf(); @Override List<String> getLabels(User user, Authorizations authorizations); } | @Test public void testEnforcingScanLabelGenerator() throws Exception { final TableName tableName = TableName.valueOf(TEST_NAME.getMethodName()); SUPERUSER.runAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = TEST_UTIL.createTable(tableName, CF)) { Put put = new Put(ROW_1); put.add(CF, Q1, HConstants.LATEST_TIMESTAMP, value); put.setCellVisibility(new CellVisibility(SECRET)); table.put(put); put = new Put(ROW_1); put.add(CF, Q2, HConstants.LATEST_TIMESTAMP, value); put.setCellVisibility(new CellVisibility(CONFIDENTIAL)); table.put(put); put = new Put(ROW_1); put.add(CF, Q3, HConstants.LATEST_TIMESTAMP, value); table.put(put); return null; } } }); SUPERUSER.runAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { Get get = new Get(ROW_1); Result result = table.get(get); assertTrue("Missing authorization", result.containsColumn(CF, Q1)); assertTrue("Missing authorization", result.containsColumn(CF, Q2)); assertTrue("Missing authorization", result.containsColumn(CF, Q3)); return null; } } }); TESTUSER.runAs(new PrivilegedExceptionAction<Void>() { public Void run() throws Exception { try (Connection connection = ConnectionFactory.createConnection(conf); Table table = connection.getTable(tableName)) { Get get = new Get(ROW_1); get.setAuthorizations(new Authorizations(new String[] { SECRET, CONFIDENTIAL })); Result result = table.get(get); assertFalse("Inappropriate authorization", result.containsColumn(CF, Q1)); assertTrue("Missing authorization", result.containsColumn(CF, Q2)); assertTrue("Inappropriate filtering", result.containsColumn(CF, Q3)); get = new Get(ROW_1); result = table.get(get); assertFalse("Inappropriate authorization", result.containsColumn(CF, Q1)); assertTrue("Missing authorization", result.containsColumn(CF, Q2)); assertTrue("Inappropriate filtering", result.containsColumn(CF, Q3)); return null; } } }); } |
HttpServer implements FilterContainer { @Override public String toString() { if (listeners.size() == 0) { return "Inactive HttpServer"; } else { StringBuilder sb = new StringBuilder("HttpServer (") .append(isAlive() ? STATE_DESCRIPTION_ALIVE : STATE_DESCRIPTION_NOT_LIVE).append("), listening at:"); for (ListenerInfo li : listeners) { Connector l = li.listener; sb.append(l.getHost()).append(":").append(l.getPort()).append("/,"); } return sb.toString(); } } @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort
); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, Connector connector); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, String[] pathSpecs); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, AccessControlList adminsAcl); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, AccessControlList adminsAcl,
Connector connector); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, AccessControlList adminsAcl,
Connector connector, String[] pathSpecs); private HttpServer(final Builder b); Connector createBaseListener(Configuration conf); @InterfaceAudience.Private static Connector createDefaultChannelConnector(); void addContext(Context ctxt, boolean isFiltered); void setAttribute(String name, Object value); void addJerseyResourcePackage(final String packageName,
final String pathSpec); void addServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz); void addInternalServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz); void addInternalServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz, boolean requireAuth); @Override void addFilter(String name, String classname,
Map<String, String> parameters); @Override void addGlobalFilter(String name, String classname,
Map<String, String> parameters); static void defineFilter(Context ctx, String name,
String classname, Map<String,String> parameters, String[] urls); Object getAttribute(String name); WebAppContext getWebAppContext(); String getWebAppsPath(String appName); @Deprecated int getPort(); InetSocketAddress getConnectorAddress(int index); void setThreads(int min, int max); void start(); void stop(); void join(); boolean isAlive(); @Override String toString(); static boolean isInstrumentationAccessAllowed(
ServletContext servletContext, HttpServletRequest request,
HttpServletResponse response); static boolean hasAdministratorAccess(
ServletContext servletContext, HttpServletRequest request,
HttpServletResponse response); static boolean userHasAdministratorAccess(ServletContext servletContext,
String remoteUser); static final String CONF_CONTEXT_ATTRIBUTE; static final String ADMINS_ACL; static final String BIND_ADDRESS; static final String SPNEGO_FILTER; static final String NO_CACHE_FILTER; static final String APP_DIR; } | @Test public void testLongHeader() throws Exception { URL url = new URL(baseUrl, "/longheader"); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); StringBuilder sb = new StringBuilder(); for (int i = 0 ; i < 63 * 1024; i++) { sb.append("a"); } conn.setRequestProperty("longheader", sb.toString()); assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); } |
NamespaceAuditor { public NamespaceTableAndRegionInfo getState(String namespace) { if (stateManager.isInitialized()) { return stateManager.getState(namespace); } return null; } NamespaceAuditor(MasterServices masterServices); void start(); void checkQuotaToCreateTable(TableName tName, int regions); void checkQuotaToUpdateRegion(TableName tName, int regions); int getRegionCountOfTable(TableName tName); void checkQuotaToSplitRegion(HRegionInfo hri); void updateQuotaForRegionMerge(HRegionInfo hri); void addNamespace(NamespaceDescriptor ns); void deleteNamespace(String namespace); void removeFromNamespaceUsage(TableName tableName); void removeRegionFromNamespaceUsage(HRegionInfo hri); NamespaceTableAndRegionInfo getState(String namespace); boolean isInitialized(); } | @Test public void testTableOperations() throws Exception { String nsp = prefix + "_np2"; NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp) .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "5") .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "2").build(); ADMIN.createNamespace(nspDesc); assertNotNull("Namespace descriptor found null.", ADMIN.getNamespaceDescriptor(nsp)); assertEquals(ADMIN.listNamespaceDescriptors().length, 3); HTableDescriptor tableDescOne = new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table1")); HTableDescriptor tableDescTwo = new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table2")); HTableDescriptor tableDescThree = new HTableDescriptor(TableName.valueOf(nsp + TableName.NAMESPACE_DELIM + "table3")); ADMIN.createTable(tableDescOne); boolean constraintViolated = false; try { ADMIN.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 5); } catch (Exception exp) { assertTrue(exp instanceof IOException); constraintViolated = true; } finally { assertTrue("Constraint not violated for table " + tableDescTwo.getTableName(), constraintViolated); } ADMIN.createTable(tableDescTwo, Bytes.toBytes("AAA"), Bytes.toBytes("ZZZ"), 4); NamespaceTableAndRegionInfo nspState = getQuotaManager().getState(nsp); assertNotNull(nspState); assertTrue(nspState.getTables().size() == 2); assertTrue(nspState.getRegionCount() == 5); constraintViolated = false; try { ADMIN.createTable(tableDescThree); } catch (Exception exp) { assertTrue(exp instanceof IOException); constraintViolated = true; } finally { assertTrue("Constraint not violated for table " + tableDescThree.getTableName(), constraintViolated); } } |
NamespaceAuditor { public void deleteNamespace(String namespace) throws IOException { stateManager.deleteNamespace(namespace); } NamespaceAuditor(MasterServices masterServices); void start(); void checkQuotaToCreateTable(TableName tName, int regions); void checkQuotaToUpdateRegion(TableName tName, int regions); int getRegionCountOfTable(TableName tName); void checkQuotaToSplitRegion(HRegionInfo hri); void updateQuotaForRegionMerge(HRegionInfo hri); void addNamespace(NamespaceDescriptor ns); void deleteNamespace(String namespace); void removeFromNamespaceUsage(TableName tableName); void removeRegionFromNamespaceUsage(HRegionInfo hri); NamespaceTableAndRegionInfo getState(String namespace); boolean isInitialized(); } | @Test public void testRecreateTableWithSameNameAfterFirstTimeFailure() throws Exception { String nsp1 = prefix + "_testRecreateTable"; NamespaceDescriptor nspDesc = NamespaceDescriptor.create(nsp1) .addConfiguration(TableNamespaceManager.KEY_MAX_REGIONS, "20") .addConfiguration(TableNamespaceManager.KEY_MAX_TABLES, "1").build(); ADMIN.createNamespace(nspDesc); final TableName tableOne = TableName.valueOf(nsp1 + TableName.NAMESPACE_DELIM + "table1"); byte[] columnFamily = Bytes.toBytes("info"); HTableDescriptor tableDescOne = new HTableDescriptor(tableOne); tableDescOne.addFamily(new HColumnDescriptor(columnFamily)); MasterSyncObserver.throwExceptionInPreCreateTableHandler = true; try { try { ADMIN.createTable(tableDescOne); fail("Table " + tableOne.toString() + "creation should fail."); } catch (Exception exp) { LOG.error(exp); } assertFalse(ADMIN.tableExists(tableOne)); NamespaceTableAndRegionInfo nstate = getNamespaceState(nsp1); assertEquals("First table creation failed in namespace so number of tables in namespace " + "should be 0.", 0, nstate.getTables().size()); MasterSyncObserver.throwExceptionInPreCreateTableHandler = false; try { ADMIN.createTable(tableDescOne); } catch (Exception e) { fail("Table " + tableOne.toString() + "creation should succeed."); LOG.error(e); } assertTrue(ADMIN.tableExists(tableOne)); nstate = getNamespaceState(nsp1); assertEquals("First table was created successfully so table size in namespace should " + "be one now.", 1, nstate.getTables().size()); } finally { MasterSyncObserver.throwExceptionInPreCreateTableHandler = false; if (ADMIN.tableExists(tableOne)) { ADMIN.disableTable(tableOne); deleteTable(tableOne); } ADMIN.deleteNamespace(nsp1); } } |
HttpServer implements FilterContainer { public static boolean hasAdministratorAccess( ServletContext servletContext, HttpServletRequest request, HttpServletResponse response) throws IOException { Configuration conf = (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE); if (!conf.getBoolean( CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) { return true; } String remoteUser = request.getRemoteUser(); if (remoteUser == null) { response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "Unauthenticated users are not " + "authorized to access this page."); return false; } if (servletContext.getAttribute(ADMINS_ACL) != null && !userHasAdministratorAccess(servletContext, remoteUser)) { response.sendError(HttpServletResponse.SC_UNAUTHORIZED, "User " + remoteUser + " is unauthorized to access this page."); return false; } return true; } @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort
); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, Connector connector); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, String[] pathSpecs); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, AccessControlList adminsAcl); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, AccessControlList adminsAcl,
Connector connector); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, AccessControlList adminsAcl,
Connector connector, String[] pathSpecs); private HttpServer(final Builder b); Connector createBaseListener(Configuration conf); @InterfaceAudience.Private static Connector createDefaultChannelConnector(); void addContext(Context ctxt, boolean isFiltered); void setAttribute(String name, Object value); void addJerseyResourcePackage(final String packageName,
final String pathSpec); void addServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz); void addInternalServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz); void addInternalServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz, boolean requireAuth); @Override void addFilter(String name, String classname,
Map<String, String> parameters); @Override void addGlobalFilter(String name, String classname,
Map<String, String> parameters); static void defineFilter(Context ctx, String name,
String classname, Map<String,String> parameters, String[] urls); Object getAttribute(String name); WebAppContext getWebAppContext(); String getWebAppsPath(String appName); @Deprecated int getPort(); InetSocketAddress getConnectorAddress(int index); void setThreads(int min, int max); void start(); void stop(); void join(); boolean isAlive(); @Override String toString(); static boolean isInstrumentationAccessAllowed(
ServletContext servletContext, HttpServletRequest request,
HttpServletResponse response); static boolean hasAdministratorAccess(
ServletContext servletContext, HttpServletRequest request,
HttpServletResponse response); static boolean userHasAdministratorAccess(ServletContext servletContext,
String remoteUser); static final String CONF_CONTEXT_ATTRIBUTE; static final String ADMINS_ACL; static final String BIND_ADDRESS; static final String SPNEGO_FILTER; static final String NO_CACHE_FILTER; static final String APP_DIR; } | @Test public void testHasAdministratorAccess() throws Exception { Configuration conf = new Configuration(); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false); ServletContext context = Mockito.mock(ServletContext.class); Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(null); HttpServletRequest request = Mockito.mock(HttpServletRequest.class); Mockito.when(request.getRemoteUser()).thenReturn(null); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); response = Mockito.mock(HttpServletResponse.class); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true); Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response)); Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString()); response = Mockito.mock(HttpServletResponse.class); Mockito.when(request.getRemoteUser()).thenReturn("foo"); Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); response = Mockito.mock(HttpServletResponse.class); AccessControlList acls = Mockito.mock(AccessControlList.class); Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(false); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertFalse(HttpServer.hasAdministratorAccess(context, request, response)); Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString()); response = Mockito.mock(HttpServletResponse.class); Mockito.when(acls.isUserAllowed(Mockito.<UserGroupInformation>any())).thenReturn(true); Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls); Assert.assertTrue(HttpServer.hasAdministratorAccess(context, request, response)); } |
MultiTableSnapshotInputFormatImpl { public Map<String, Collection<Scan>> getSnapshotsToScans(Configuration conf) throws IOException { Map<String, Collection<Scan>> rtn = Maps.newHashMap(); for (Map.Entry<String, String> entry : ConfigurationUtil .getKeyValues(conf, SNAPSHOT_TO_SCANS_KEY)) { String snapshotName = entry.getKey(); String scan = entry.getValue(); Collection<Scan> snapshotScans = rtn.get(snapshotName); if (snapshotScans == null) { snapshotScans = Lists.newArrayList(); rtn.put(snapshotName, snapshotScans); } snapshotScans.add(TableMapReduceUtil.convertStringToScan(scan)); } return rtn; } void setInput(Configuration conf, Map<String, Collection<Scan>> snapshotScans,
Path restoreDir); List<TableSnapshotInputFormatImpl.InputSplit> getSplits(Configuration conf); Map<String, Collection<Scan>> getSnapshotsToScans(Configuration conf); void setSnapshotToScans(Configuration conf, Map<String, Collection<Scan>> snapshotScans); Map<String, Path> getSnapshotDirs(Configuration conf); void setSnapshotDirs(Configuration conf, Map<String, Path> snapshotDirs); void restoreSnapshots(Configuration conf, Map<String, Path> snapshotToDir, FileSystem fs); static final String RESTORE_DIRS_KEY; static final String SNAPSHOT_TO_SCANS_KEY; } | @Test public void testSetInputSetsSnapshotToScans() throws Exception { callSetInput(); Map<String, Collection<Scan>> actual = subject.getSnapshotsToScans(conf); Map<String, Collection<ScanWithEquals>> actualWithEquals = toScanWithEquals(actual); Map<String, Collection<ScanWithEquals>> expectedWithEquals = toScanWithEquals(snapshotScans); assertEquals(expectedWithEquals, actualWithEquals); } |
MultiTableSnapshotInputFormatImpl { public Map<String, Path> getSnapshotDirs(Configuration conf) throws IOException { List<Map.Entry<String, String>> kvps = ConfigurationUtil.getKeyValues(conf, RESTORE_DIRS_KEY); Map<String, Path> rtn = Maps.newHashMapWithExpectedSize(kvps.size()); for (Map.Entry<String, String> kvp : kvps) { rtn.put(kvp.getKey(), new Path(kvp.getValue())); } return rtn; } void setInput(Configuration conf, Map<String, Collection<Scan>> snapshotScans,
Path restoreDir); List<TableSnapshotInputFormatImpl.InputSplit> getSplits(Configuration conf); Map<String, Collection<Scan>> getSnapshotsToScans(Configuration conf); void setSnapshotToScans(Configuration conf, Map<String, Collection<Scan>> snapshotScans); Map<String, Path> getSnapshotDirs(Configuration conf); void setSnapshotDirs(Configuration conf, Map<String, Path> snapshotDirs); void restoreSnapshots(Configuration conf, Map<String, Path> snapshotToDir, FileSystem fs); static final String RESTORE_DIRS_KEY; static final String SNAPSHOT_TO_SCANS_KEY; } | @Test public void testSetInputPushesRestoreDirectories() throws Exception { callSetInput(); Map<String, Path> restoreDirs = subject.getSnapshotDirs(conf); assertEquals(this.snapshotScans.keySet(), restoreDirs.keySet()); }
@Test public void testSetInputCreatesRestoreDirectoriesUnderRootRestoreDir() throws Exception { callSetInput(); Map<String, Path> restoreDirs = subject.getSnapshotDirs(conf); for (Path snapshotDir : restoreDirs.values()) { assertEquals("Expected " + snapshotDir + " to be a child of " + restoreDir, restoreDir, snapshotDir.getParent()); } } |
TableSplit extends InputSplit implements Writable, Comparable<TableSplit> { @Override public int hashCode() { int result = tableName != null ? tableName.hashCode() : 0; result = 31 * result + (scan != null ? scan.hashCode() : 0); result = 31 * result + (startRow != null ? Arrays.hashCode(startRow) : 0); result = 31 * result + (endRow != null ? Arrays.hashCode(endRow) : 0); result = 31 * result + (regionLocation != null ? regionLocation.hashCode() : 0); return result; } TableSplit(); @Deprecated TableSplit(final byte [] tableName, Scan scan, byte [] startRow, byte [] endRow,
final String location); TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow,
final String location); TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow,
final String location, long length); @Deprecated TableSplit(final byte [] tableName, byte[] startRow, byte[] endRow,
final String location); TableSplit(TableName tableName, byte[] startRow, byte[] endRow,
final String location); TableSplit(TableName tableName, byte[] startRow, byte[] endRow,
final String location, long length); Scan getScan(); byte [] getTableName(); TableName getTable(); byte [] getStartRow(); byte [] getEndRow(); String getRegionLocation(); @Override String[] getLocations(); @Override long getLength(); @Override void readFields(DataInput in); @Override void write(DataOutput out); @Override String toString(); @Override int compareTo(TableSplit split); @Override boolean equals(Object o); @Override int hashCode(); @Deprecated
static final Log LOG; } | @Test public void testHashCode() { TableSplit split1 = new TableSplit(TableName.valueOf("table"), "row-start".getBytes(), "row-end".getBytes(), "location"); TableSplit split2 = new TableSplit(TableName.valueOf("table"), "row-start".getBytes(), "row-end".getBytes(), "location"); assertEquals (split1, split2); assertTrue (split1.hashCode() == split2.hashCode()); HashSet<TableSplit> set = new HashSet<TableSplit>(2); set.add(split1); set.add(split2); assertTrue(set.size() == 1); }
@Test public void testHashCode_length() { TableSplit split1 = new TableSplit(TableName.valueOf("table"), "row-start".getBytes(), "row-end".getBytes(), "location", 1984); TableSplit split2 = new TableSplit(TableName.valueOf("table"), "row-start".getBytes(), "row-end".getBytes(), "location", 1982); assertEquals (split1, split2); assertTrue (split1.hashCode() == split2.hashCode()); HashSet<TableSplit> set = new HashSet<TableSplit>(2); set.add(split1); set.add(split2); assertTrue(set.size() == 1); } |
TableSplit extends InputSplit implements Writable, Comparable<TableSplit> { @Override public long getLength() { return length; } TableSplit(); @Deprecated TableSplit(final byte [] tableName, Scan scan, byte [] startRow, byte [] endRow,
final String location); TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow,
final String location); TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow,
final String location, long length); @Deprecated TableSplit(final byte [] tableName, byte[] startRow, byte[] endRow,
final String location); TableSplit(TableName tableName, byte[] startRow, byte[] endRow,
final String location); TableSplit(TableName tableName, byte[] startRow, byte[] endRow,
final String location, long length); Scan getScan(); byte [] getTableName(); TableName getTable(); byte [] getStartRow(); byte [] getEndRow(); String getRegionLocation(); @Override String[] getLocations(); @Override long getLength(); @Override void readFields(DataInput in); @Override void write(DataOutput out); @Override String toString(); @Override int compareTo(TableSplit split); @Override boolean equals(Object o); @Override int hashCode(); @Deprecated
static final Log LOG; } | @Test public void testLengthIsSerialized() throws Exception { TableSplit split1 = new TableSplit(TableName.valueOf("table"), "row-start".getBytes(), "row-end".getBytes(), "location", 666); TableSplit deserialized = new TableSplit(TableName.valueOf("table"), "row-start2".getBytes(), "row-end2".getBytes(), "location1"); ReflectionUtils.copy(new Configuration(), split1, deserialized); Assert.assertEquals(666, deserialized.getLength()); } |
TableSplit extends InputSplit implements Writable, Comparable<TableSplit> { @Override public String toString() { StringBuilder sb = new StringBuilder(); sb.append("HBase table split("); sb.append("table name: ").append(tableName); sb.append(", scan: ").append(scan); sb.append(", start row: ").append(Bytes.toStringBinary(startRow)); sb.append(", end row: ").append(Bytes.toStringBinary(endRow)); sb.append(", region location: ").append(regionLocation); sb.append(")"); return sb.toString(); } TableSplit(); @Deprecated TableSplit(final byte [] tableName, Scan scan, byte [] startRow, byte [] endRow,
final String location); TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow,
final String location); TableSplit(TableName tableName, Scan scan, byte [] startRow, byte [] endRow,
final String location, long length); @Deprecated TableSplit(final byte [] tableName, byte[] startRow, byte[] endRow,
final String location); TableSplit(TableName tableName, byte[] startRow, byte[] endRow,
final String location); TableSplit(TableName tableName, byte[] startRow, byte[] endRow,
final String location, long length); Scan getScan(); byte [] getTableName(); TableName getTable(); byte [] getStartRow(); byte [] getEndRow(); String getRegionLocation(); @Override String[] getLocations(); @Override long getLength(); @Override void readFields(DataInput in); @Override void write(DataOutput out); @Override String toString(); @Override int compareTo(TableSplit split); @Override boolean equals(Object o); @Override int hashCode(); @Deprecated
static final Log LOG; } | @Test public void testToString() { TableSplit split = new TableSplit(TableName.valueOf("table"), "row-start".getBytes(), "row-end".getBytes(), "location"); String str = "HBase table split(table name: table, scan: , start row: row-start, " + "end row: row-end, region location: location)"; Assert.assertEquals(str, split.toString()); split = new TableSplit((TableName) null, null, null, null); str = "HBase table split(table name: null, scan: , start row: null, " + "end row: null, region location: null)"; Assert.assertEquals(str, split.toString()); } |
HashTable extends Configured implements Tool { public HashTable(Configuration conf) { super(conf); } HashTable(Configuration conf); Job createSubmittableJob(String[] args); static void main(String[] args); @Override int run(String[] args); } | @Test public void testHashTable() throws Exception { final String tableName = "testHashTable"; final byte[] family = Bytes.toBytes("family"); final byte[] column1 = Bytes.toBytes("c1"); final byte[] column2 = Bytes.toBytes("c2"); final byte[] column3 = Bytes.toBytes("c3"); int numRows = 100; int numRegions = 10; int numHashFiles = 3; byte[][] splitRows = new byte[numRegions-1][]; for (int i = 1; i < numRegions; i++) { splitRows[i-1] = Bytes.toBytes(numRows * i / numRegions); } long timestamp = 1430764183454L; HTable t1 = TEST_UTIL.createTable(TableName.valueOf(tableName), family, splitRows); for (int i = 0; i < numRows; i++) { Put p = new Put(Bytes.toBytes(i), timestamp); p.addColumn(family, column1, column1); p.addColumn(family, column2, column2); p.addColumn(family, column3, column3); t1.put(p); } t1.close(); HashTable hashTable = new HashTable(TEST_UTIL.getConfiguration()); Path testDir = TEST_UTIL.getDataTestDirOnTestFS(tableName); long batchSize = 300; int code = hashTable.run(new String[] { "--batchsize=" + batchSize, "--numhashfiles=" + numHashFiles, "--scanbatch=2", tableName, testDir.toString()}); assertEquals("test job failed", 0, code); FileSystem fs = TEST_UTIL.getTestFileSystem(); HashTable.TableHash tableHash = HashTable.TableHash.read(fs.getConf(), testDir); assertEquals(tableName, tableHash.tableName); assertEquals(batchSize, tableHash.batchSize); assertEquals(numHashFiles, tableHash.numHashFiles); assertEquals(numHashFiles - 1, tableHash.partitions.size()); for (ImmutableBytesWritable bytes : tableHash.partitions) { LOG.debug("partition: " + Bytes.toInt(bytes.get())); } ImmutableMap<Integer, ImmutableBytesWritable> expectedHashes = ImmutableMap.<Integer, ImmutableBytesWritable>builder() .put(-1, new ImmutableBytesWritable(Bytes.fromHex("714cb10a9e3b5569852980edd8c6ca2f"))) .put(5, new ImmutableBytesWritable(Bytes.fromHex("28d961d9252ce8f8d44a07b38d3e1d96"))) .put(10, new ImmutableBytesWritable(Bytes.fromHex("f6bbc4a224d8fd929b783a92599eaffa"))) .put(15, new ImmutableBytesWritable(Bytes.fromHex("522deb5d97f73a414ecc11457be46881"))) .put(20, new ImmutableBytesWritable(Bytes.fromHex("b026f2611aaa46f7110116d807545352"))) .put(25, new ImmutableBytesWritable(Bytes.fromHex("39ffc1a3094aa12a2e90ffd9cef2ce93"))) .put(30, new ImmutableBytesWritable(Bytes.fromHex("f6b4d75727ce9a30ac29e4f08f601666"))) .put(35, new ImmutableBytesWritable(Bytes.fromHex("422e2d2f1eb79a8f02171a705a42c090"))) .put(40, new ImmutableBytesWritable(Bytes.fromHex("559ad61c900fffefea0a15abf8a97bc3"))) .put(45, new ImmutableBytesWritable(Bytes.fromHex("23019084513eca41cee436b2a29611cb"))) .put(50, new ImmutableBytesWritable(Bytes.fromHex("b40467d222ddb4949b142fe145ee9edc"))) .put(55, new ImmutableBytesWritable(Bytes.fromHex("372bf89fcd8ca4b7ab3c1add9d07f7e4"))) .put(60, new ImmutableBytesWritable(Bytes.fromHex("69ae0585e6255de27dce974e332b8f8b"))) .put(65, new ImmutableBytesWritable(Bytes.fromHex("8029610044297aad0abdbecd485d8e59"))) .put(70, new ImmutableBytesWritable(Bytes.fromHex("de5f784f7f78987b6e57ecfd81c8646f"))) .put(75, new ImmutableBytesWritable(Bytes.fromHex("1cd757cc4e1715c8c3b1c24447a1ec56"))) .put(80, new ImmutableBytesWritable(Bytes.fromHex("f9a53aacfeb6142b08066615e7038095"))) .put(85, new ImmutableBytesWritable(Bytes.fromHex("89b872b7e639df32d3276b33928c0c91"))) .put(90, new ImmutableBytesWritable(Bytes.fromHex("45eeac0646d46a474ea0484175faed38"))) .put(95, new ImmutableBytesWritable(Bytes.fromHex("f57c447e32a08f4bf1abb2892839ac56"))) .build(); Map<Integer, ImmutableBytesWritable> actualHashes = new HashMap<Integer, ImmutableBytesWritable>(); Path dataDir = new Path(testDir, HashTable.HASH_DATA_DIR); for (int i = 0; i < numHashFiles; i++) { Path hashPath = new Path(dataDir, HashTable.TableHash.getDataFileName(i)); MapFile.Reader reader = new MapFile.Reader(hashPath, fs.getConf()); ImmutableBytesWritable key = new ImmutableBytesWritable(); ImmutableBytesWritable hash = new ImmutableBytesWritable(); while(reader.next(key, hash)) { String keyString = Bytes.toHex(key.get(), key.getOffset(), key.getLength()); LOG.debug("Key: " + (keyString.isEmpty() ? "-1" : Integer.parseInt(keyString, 16)) + " Hash: " + Bytes.toHex(hash.get(), hash.getOffset(), hash.getLength())); int intKey = -1; if (key.getLength() > 0) { intKey = Bytes.toInt(key.get(), key.getOffset(), key.getLength()); } if (actualHashes.containsKey(intKey)) { Assert.fail("duplicate key in data files: " + intKey); } actualHashes.put(intKey, new ImmutableBytesWritable(hash.copyBytes())); } reader.close(); } FileStatus[] files = fs.listStatus(testDir); for (FileStatus file : files) { LOG.debug("Output file: " + file.getPath()); } files = fs.listStatus(dataDir); for (FileStatus file : files) { LOG.debug("Data file: " + file.getPath()); } if (!expectedHashes.equals(actualHashes)) { LOG.error("Diff: " + Maps.difference(expectedHashes, actualHashes)); } Assert.assertEquals(expectedHashes, actualHashes); TEST_UTIL.deleteTable(tableName); TEST_UTIL.cleanupDataTestDirOnTestFS(); } |
TableInputFormatBase extends InputFormat<ImmutableBytesWritable, Result> { @Deprecated public String reverseDNS(InetAddress ipAddress) throws NamingException, UnknownHostException { String hostName = this.reverseDNSCacheMap.get(ipAddress); if (hostName == null) { String ipAddressString = null; try { ipAddressString = DNS.reverseDns(ipAddress, null); } catch (Exception e) { ipAddressString = InetAddress.getByName(ipAddress.getHostAddress()).getHostName(); } if (ipAddressString == null) throw new UnknownHostException("No host found for " + ipAddress); hostName = Strings.domainNamePointerToHostName(ipAddressString); this.reverseDNSCacheMap.put(ipAddress, hostName); } return hostName; } @Override RecordReader<ImmutableBytesWritable, Result> createRecordReader(
InputSplit split, TaskAttemptContext context); @Override List<InputSplit> getSplits(JobContext context); @Deprecated String reverseDNS(InetAddress ipAddress); List<InputSplit> calculateRebalancedSplits(List<InputSplit> list, JobContext context,
long average); static byte[] getSplitKey(byte[] start, byte[] end, boolean isText); Scan getScan(); void setScan(Scan scan); static final String MAPREDUCE_INPUT_AUTOBALANCE; static final String INPUT_AUTOBALANCE_MAXSKEWRATIO; static final String TABLE_ROW_TEXTKEY; } | @Test public void testTableInputFormatBaseReverseDNSForIPv6() throws UnknownHostException, NamingException { String address = "ipv6.google.com"; String localhost = null; InetAddress addr = null; TableInputFormat inputFormat = new TableInputFormat(); try { localhost = InetAddress.getByName(address).getCanonicalHostName(); addr = Inet6Address.getByName(address); } catch (UnknownHostException e) { return; } System.out.println("Should retrun the hostname for this host " + localhost + " addr : " + addr); String actualHostName = inputFormat.reverseDNS(addr); assertEquals("Should retrun the hostname for this host. Expected : " + localhost + " Actual : " + actualHostName, localhost, actualHostName); } |
LoadIncrementalHFiles extends Configured implements Tool { protected List<LoadQueueItem> splitStoreFile(final LoadQueueItem item, final Table table, byte[] startKey, byte[] splitKey) throws IOException { final Path hfilePath = item.hfilePath; final String TMP_DIR = "_tmp"; Path tmpDir = item.hfilePath.getParent(); if (!tmpDir.getName().equals(TMP_DIR)) { tmpDir = new Path(tmpDir, TMP_DIR); } LOG.info("HFile at " + hfilePath + " no longer fits inside a single " + "region. Splitting..."); String uniqueName = getUniqueName(); HColumnDescriptor familyDesc = table.getTableDescriptor().getFamily(item.family); Path botOut = new Path(tmpDir, uniqueName + ".bottom"); Path topOut = new Path(tmpDir, uniqueName + ".top"); splitStoreFile(getConf(), hfilePath, familyDesc, splitKey, botOut, topOut); FileSystem fs = tmpDir.getFileSystem(getConf()); fs.setPermission(tmpDir, FsPermission.valueOf("-rwxrwxrwx")); fs.setPermission(botOut, FsPermission.valueOf("-rwxrwxrwx")); fs.setPermission(topOut, FsPermission.valueOf("-rwxrwxrwx")); List<LoadQueueItem> lqis = new ArrayList<LoadQueueItem>(2); lqis.add(new LoadQueueItem(item.family, botOut)); lqis.add(new LoadQueueItem(item.family, topOut)); LOG.info("Successfully split into new HFiles " + botOut + " and " + topOut); return lqis; } private LoadIncrementalHFiles(); LoadIncrementalHFiles(Configuration conf); @SuppressWarnings("deprecation") void doBulkLoad(Path hfofDir, final HTable table); @SuppressWarnings("deprecation") void doBulkLoad(Path hfofDir, final Admin admin, Table table,
RegionLocator regionLocator); static byte[][] inferBoundaries(TreeMap<byte[], Integer> bdryMap); @Override int run(String[] args); static void main(String[] args); static final String NAME; static final String MAX_FILES_PER_REGION_PER_FAMILY; final static String CREATE_TABLE_CONF_KEY; } | @Test(timeout = 120000) public void testSplitStoreFile() throws IOException { Path dir = util.getDataTestDirOnTestFS("testSplitHFile"); FileSystem fs = util.getTestFileSystem(); Path testIn = new Path(dir, "testhfile"); HColumnDescriptor familyDesc = new HColumnDescriptor(FAMILY); HFileTestUtil.createHFile(util.getConfiguration(), fs, testIn, FAMILY, QUALIFIER, Bytes.toBytes("aaa"), Bytes.toBytes("zzz"), 1000); Path bottomOut = new Path(dir, "bottom.out"); Path topOut = new Path(dir, "top.out"); LoadIncrementalHFiles.splitStoreFile( util.getConfiguration(), testIn, familyDesc, Bytes.toBytes("ggg"), bottomOut, topOut); int rowCount = verifyHFile(bottomOut); rowCount += verifyHFile(topOut); assertEquals(1000, rowCount); } |
HttpServer implements FilterContainer { public void stop() throws Exception { MultiException exception = null; for (ListenerInfo li : listeners) { if (!li.isManaged) { continue; } try { li.listener.close(); } catch (Exception e) { LOG.error( "Error while stopping listener for webapp" + webAppContext.getDisplayName(), e); exception = addMultiException(exception, e); } } try { webAppContext.clearAttributes(); webAppContext.stop(); } catch (Exception e) { LOG.error("Error while stopping web app context for webapp " + webAppContext.getDisplayName(), e); exception = addMultiException(exception, e); } try { webServer.stop(); } catch (Exception e) { LOG.error("Error while stopping web server for webapp " + webAppContext.getDisplayName(), e); exception = addMultiException(exception, e); } if (exception != null) { exception.ifExceptionThrow(); } } @Deprecated HttpServer(String name, String bindAddress, int port, boolean findPort
); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, Connector connector); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, String[] pathSpecs); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, AccessControlList adminsAcl); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, AccessControlList adminsAcl,
Connector connector); @Deprecated HttpServer(String name, String bindAddress, int port,
boolean findPort, Configuration conf, AccessControlList adminsAcl,
Connector connector, String[] pathSpecs); private HttpServer(final Builder b); Connector createBaseListener(Configuration conf); @InterfaceAudience.Private static Connector createDefaultChannelConnector(); void addContext(Context ctxt, boolean isFiltered); void setAttribute(String name, Object value); void addJerseyResourcePackage(final String packageName,
final String pathSpec); void addServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz); void addInternalServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz); void addInternalServlet(String name, String pathSpec,
Class<? extends HttpServlet> clazz, boolean requireAuth); @Override void addFilter(String name, String classname,
Map<String, String> parameters); @Override void addGlobalFilter(String name, String classname,
Map<String, String> parameters); static void defineFilter(Context ctx, String name,
String classname, Map<String,String> parameters, String[] urls); Object getAttribute(String name); WebAppContext getWebAppContext(); String getWebAppsPath(String appName); @Deprecated int getPort(); InetSocketAddress getConnectorAddress(int index); void setThreads(int min, int max); void start(); void stop(); void join(); boolean isAlive(); @Override String toString(); static boolean isInstrumentationAccessAllowed(
ServletContext servletContext, HttpServletRequest request,
HttpServletResponse response); static boolean hasAdministratorAccess(
ServletContext servletContext, HttpServletRequest request,
HttpServletResponse response); static boolean userHasAdministratorAccess(ServletContext servletContext,
String remoteUser); static final String CONF_CONTEXT_ATTRIBUTE; static final String ADMINS_ACL; static final String BIND_ADDRESS; static final String SPNEGO_FILTER; static final String NO_CACHE_FILTER; static final String APP_DIR; } | @Test public void testHttpServerBuilderWithExternalConnector() throws Exception { Connector c = mock(Connector.class); doReturn("localhost").when(c).getHost(); HttpServer s = new HttpServer.Builder().setName("test").setConnector(c) .build(); s.stop(); } |
LoadIncrementalHFiles extends Configured implements Tool { public static byte[][] inferBoundaries(TreeMap<byte[], Integer> bdryMap) { ArrayList<byte[]> keysArray = new ArrayList<byte[]>(); int runningValue = 0; byte[] currStartKey = null; boolean firstBoundary = true; for (Map.Entry<byte[], Integer> item: bdryMap.entrySet()) { if (runningValue == 0) currStartKey = item.getKey(); runningValue += item.getValue(); if (runningValue == 0) { if (!firstBoundary) keysArray.add(currStartKey); firstBoundary = false; } } return keysArray.toArray(new byte[0][0]); } private LoadIncrementalHFiles(); LoadIncrementalHFiles(Configuration conf); @SuppressWarnings("deprecation") void doBulkLoad(Path hfofDir, final HTable table); @SuppressWarnings("deprecation") void doBulkLoad(Path hfofDir, final Admin admin, Table table,
RegionLocator regionLocator); static byte[][] inferBoundaries(TreeMap<byte[], Integer> bdryMap); @Override int run(String[] args); static void main(String[] args); static final String NAME; static final String MAX_FILES_PER_REGION_PER_FAMILY; final static String CREATE_TABLE_CONF_KEY; } | @Test(timeout = 120000) public void testInferBoundaries() { TreeMap<byte[], Integer> map = new TreeMap<byte[], Integer>(Bytes.BYTES_COMPARATOR); String first; String last; first = "a"; last = "e"; addStartEndKeysForTest(map, first.getBytes(), last.getBytes()); first = "r"; last = "s"; addStartEndKeysForTest(map, first.getBytes(), last.getBytes()); first = "o"; last = "p"; addStartEndKeysForTest(map, first.getBytes(), last.getBytes()); first = "g"; last = "k"; addStartEndKeysForTest(map, first.getBytes(), last.getBytes()); first = "v"; last = "x"; addStartEndKeysForTest(map, first.getBytes(), last.getBytes()); first = "c"; last = "i"; addStartEndKeysForTest(map, first.getBytes(), last.getBytes()); first = "m"; last = "q"; addStartEndKeysForTest(map, first.getBytes(), last.getBytes()); first = "s"; last = "t"; addStartEndKeysForTest(map, first.getBytes(), last.getBytes()); first = "u"; last = "w"; addStartEndKeysForTest(map, first.getBytes(), last.getBytes()); byte[][] keysArray = LoadIncrementalHFiles.inferBoundaries(map); byte[][] compare = new byte[3][]; compare[0] = "m".getBytes(); compare[1] = "r".getBytes(); compare[2] = "u".getBytes(); assertEquals(keysArray.length, 3); for (int row = 0; row<keysArray.length; row++){ assertArrayEquals(keysArray[row], compare[row]); } } |
LoadIncrementalHFiles extends Configured implements Tool { @Override public int run(String[] args) throws Exception { if (args.length != 2) { usage(); return -1; } initialize(); String dirPath = args[0]; TableName tableName = TableName.valueOf(args[1]); boolean tableExists = this.doesTableExist(tableName); if (!tableExists) { if ("yes".equalsIgnoreCase(getConf().get(CREATE_TABLE_CONF_KEY, "yes"))) { this.createTable(tableName, dirPath); } else { String errorMsg = format("Table '%s' does not exist.", tableName); LOG.error(errorMsg); throw new TableNotFoundException(errorMsg); } } Path hfofDir = new Path(dirPath); try (Connection connection = ConnectionFactory.createConnection(getConf()); HTable table = (HTable) connection.getTable(tableName);) { doBulkLoad(hfofDir, table); } return 0; } private LoadIncrementalHFiles(); LoadIncrementalHFiles(Configuration conf); @SuppressWarnings("deprecation") void doBulkLoad(Path hfofDir, final HTable table); @SuppressWarnings("deprecation") void doBulkLoad(Path hfofDir, final Admin admin, Table table,
RegionLocator regionLocator); static byte[][] inferBoundaries(TreeMap<byte[], Integer> bdryMap); @Override int run(String[] args); static void main(String[] args); static final String NAME; static final String MAX_FILES_PER_REGION_PER_FAMILY; final static String CREATE_TABLE_CONF_KEY; } | @Test(timeout = 60000) public void testLoadTooMayHFiles() throws Exception { Path dir = util.getDataTestDirOnTestFS("testLoadTooMayHFiles"); FileSystem fs = util.getTestFileSystem(); dir = dir.makeQualified(fs); Path familyDir = new Path(dir, Bytes.toString(FAMILY)); byte[] from = Bytes.toBytes("begin"); byte[] to = Bytes.toBytes("end"); for (int i = 0; i <= MAX_FILES_PER_REGION_PER_FAMILY; i++) { HFileTestUtil.createHFile(util.getConfiguration(), fs, new Path(familyDir, "hfile_" + i), FAMILY, QUALIFIER, from, to, 1000); } LoadIncrementalHFiles loader = new LoadIncrementalHFiles(util.getConfiguration()); String [] args= {dir.toString(), "mytable_testLoadTooMayHFiles"}; try { loader.run(args); fail("Bulk loading too many files should fail"); } catch (IOException ie) { assertTrue(ie.getMessage().contains("Trying to load more than " + MAX_FILES_PER_REGION_PER_FAMILY + " hfiles")); } }
@Test(expected = TableNotFoundException.class) public void testWithoutAnExistingTableAndCreateTableSetToNo() throws Exception { Configuration conf = util.getConfiguration(); conf.set(LoadIncrementalHFiles.CREATE_TABLE_CONF_KEY, "no"); LoadIncrementalHFiles loader = new LoadIncrementalHFiles(conf); String[] args = { "directory", "nonExistingTable" }; loader.run(args); } |
CopyTable extends Configured implements Tool { public CopyTable(Configuration conf) { super(conf); } CopyTable(Configuration conf); Job createSubmittableJob(String[] args); static void main(String[] args); @Override int run(String[] args); } | @Test public void testCopyTable() throws Exception { doCopyTableTest(false); } |
CopyTable extends Configured implements Tool { @Override public int run(String[] args) throws Exception { String[] otherArgs = new GenericOptionsParser(getConf(), args).getRemainingArgs(); Job job = createSubmittableJob(otherArgs); if (job == null) return 1; if (!job.waitForCompletion(true)) { LOG.info("Map-reduce job failed!"); if (bulkload) { LOG.info("Files are not bulkloaded!"); } return 1; } int code = 0; if (bulkload) { code = new LoadIncrementalHFiles(this.getConf()).run(new String[]{this.bulkloadDir.toString(), this.dstTableName}); if (code == 0) { FileSystem fs = FileSystem.get(this.getConf()); if (!fs.delete(this.bulkloadDir, true)) { LOG.error("Deleting folder " + bulkloadDir + " failed!"); code = 1; } } } return code; } CopyTable(Configuration conf); Job createSubmittableJob(String[] args); static void main(String[] args); @Override int run(String[] args); } | @Test public void testStartStopRow() throws Exception { final TableName TABLENAME1 = TableName.valueOf("testStartStopRow1"); final TableName TABLENAME2 = TableName.valueOf("testStartStopRow2"); final byte[] FAMILY = Bytes.toBytes("family"); final byte[] COLUMN1 = Bytes.toBytes("c1"); final byte[] ROW0 = Bytes.toBytes("row0"); final byte[] ROW1 = Bytes.toBytes("row1"); final byte[] ROW2 = Bytes.toBytes("row2"); Table t1 = TEST_UTIL.createTable(TABLENAME1, FAMILY); Table t2 = TEST_UTIL.createTable(TABLENAME2, FAMILY); Put p = new Put(ROW0); p.add(FAMILY, COLUMN1, COLUMN1); t1.put(p); p = new Put(ROW1); p.add(FAMILY, COLUMN1, COLUMN1); t1.put(p); p = new Put(ROW2); p.add(FAMILY, COLUMN1, COLUMN1); t1.put(p); CopyTable copy = new CopyTable(TEST_UTIL.getConfiguration()); assertEquals( 0, copy.run(new String[] { "--new.name=" + TABLENAME2, "--startrow=row1", "--stoprow=row2", TABLENAME1.getNameAsString() })); Get g = new Get(ROW1); Result r = t2.get(g); assertEquals(1, r.size()); assertTrue(CellUtil.matchingQualifier(r.rawCells()[0], COLUMN1)); g = new Get(ROW0); r = t2.get(g); assertEquals(0, r.size()); g = new Get(ROW2); r = t2.get(g); assertEquals(0, r.size()); t1.close(); t2.close(); TEST_UTIL.deleteTable(TABLENAME1); TEST_UTIL.deleteTable(TABLENAME2); } |
CopyTable extends Configured implements Tool { public static void main(String[] args) throws Exception { int ret = ToolRunner.run(new CopyTable(HBaseConfiguration.create()), args); System.exit(ret); } CopyTable(Configuration conf); Job createSubmittableJob(String[] args); static void main(String[] args); @Override int run(String[] args); } | @Test public void testMainMethod() throws Exception { String[] emptyArgs = { "-h" }; PrintStream oldWriter = System.err; ByteArrayOutputStream data = new ByteArrayOutputStream(); PrintStream writer = new PrintStream(data); System.setErr(writer); SecurityManager SECURITY_MANAGER = System.getSecurityManager(); LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); try { CopyTable.main(emptyArgs); fail("should be exit"); } catch (SecurityException e) { assertEquals(1, newSecurityManager.getExitCode()); } finally { System.setErr(oldWriter); System.setSecurityManager(SECURITY_MANAGER); } assertTrue(data.toString().contains("rs.class")); assertTrue(data.toString().contains("Usage:")); } |
ImportTsv extends Configured implements Tool { private static void createTable(Admin admin, TableName tableName, String[] columns) throws IOException { HTableDescriptor htd = new HTableDescriptor(tableName); Set<String> cfSet = getColumnFamilies(columns); for (String cf : cfSet) { HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes(cf)); htd.addFamily(hcd); } LOG.warn(format("Creating table '%s' with '%s' columns and default descriptors.", tableName, cfSet)); admin.createTable(htd); } static Job createSubmittableJob(Configuration conf, String[] args); @Override int run(String[] args); static void main(String[] args); final static String MAPPER_CONF_KEY; final static String BULK_OUTPUT_CONF_KEY; final static String TIMESTAMP_CONF_KEY; final static String JOB_NAME_CONF_KEY; final static String SKIP_LINES_CONF_KEY; final static String COLUMNS_CONF_KEY; final static String SEPARATOR_CONF_KEY; final static String ATTRIBUTE_SEPERATOR_CONF_KEY; final static String CREDENTIALS_LOCATION; final static String CREATE_TABLE_CONF_KEY; final static String NO_STRICT_COL_FAMILY; } | @Test public void testMROnTable() throws Exception { String table = "test-" + UUID.randomUUID(); String[] args = new String[] { "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", table }; util.createTable(TableName.valueOf(table), FAMILY); doMROnTableTest(util, FAMILY, null, args, 1); util.deleteTable(table); }
@Test public void testMROnTableWithTimestamp() throws Exception { String table = "test-" + UUID.randomUUID(); String[] args = new String[] { "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,HBASE_TS_KEY,FAM:A,FAM:B", "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", table }; String data = "KEY,1234,VALUE1,VALUE2\n"; util.createTable(TableName.valueOf(table), FAMILY); doMROnTableTest(util, FAMILY, data, args, 1); util.deleteTable(table); }
@Test public void testMROnTableWithCustomMapper() throws Exception { String table = "test-" + UUID.randomUUID(); String[] args = new String[] { "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterCustomTestMapper", table }; util.createTable(TableName.valueOf(table), FAMILY); doMROnTableTest(util, FAMILY, null, args, 3); util.deleteTable(table); }
@Test public void testBulkOutputWithAnExistingTable() throws Exception { String table = "test-" + UUID.randomUUID(); Path hfiles = new Path(util.getDataTestDirOnTestFS(table), "hfiles"); String[] args = new String[] { "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), table }; util.createTable(TableName.valueOf(table), FAMILY); doMROnTableTest(util, FAMILY, null, args, 3); util.deleteTable(table); }
@Test public void testBulkOutputWithAnExistingTableNoStrictTrue() throws Exception { String table = "test-" + UUID.randomUUID(); Path hfiles = new Path(util.getDataTestDirOnTestFS(table), "hfiles"); String[] args = new String[] { "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=\u001b", "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + hfiles.toString(), "-D" + ImportTsv.NO_STRICT_COL_FAMILY + "=true", table }; util.createTable(TableName.valueOf(table), FAMILY); doMROnTableTest(util, FAMILY, null, args, 3); util.deleteTable(table); } |
ImportTsv extends Configured implements Tool { public static Job createSubmittableJob(Configuration conf, String[] args) throws IOException, ClassNotFoundException { Job job = null; try (Connection connection = ConnectionFactory.createConnection(conf)) { try (Admin admin = connection.getAdmin()) { String actualSeparator = conf.get(SEPARATOR_CONF_KEY); if (actualSeparator != null) { conf.set(SEPARATOR_CONF_KEY, Base64.encodeBytes(actualSeparator.getBytes())); } String mapperClassName = conf.get(MAPPER_CONF_KEY); Class mapperClass = mapperClassName != null ? Class.forName(mapperClassName) : DEFAULT_MAPPER; TableName tableName = TableName.valueOf(args[0]); Path inputDir = new Path(args[1]); String jobName = conf.get(JOB_NAME_CONF_KEY,NAME + "_" + tableName.getNameAsString()); job = Job.getInstance(conf, jobName); job.setJarByClass(mapperClass); FileInputFormat.setInputPaths(job, inputDir); job.setInputFormatClass(TextInputFormat.class); job.setMapperClass(mapperClass); String hfileOutPath = conf.get(BULK_OUTPUT_CONF_KEY); String columns[] = conf.getStrings(COLUMNS_CONF_KEY); if(StringUtils.isNotEmpty(conf.get(CREDENTIALS_LOCATION))) { String fileLoc = conf.get(CREDENTIALS_LOCATION); Credentials cred = Credentials.readTokenStorageFile(new File(fileLoc), conf); job.getCredentials().addAll(cred); } if (hfileOutPath != null) { if (!admin.tableExists(tableName)) { String errorMsg = format("Table '%s' does not exist.", tableName); if ("yes".equalsIgnoreCase(conf.get(CREATE_TABLE_CONF_KEY, "yes"))) { LOG.warn(errorMsg); createTable(admin, tableName, columns); } else { LOG.error(errorMsg); throw new TableNotFoundException(errorMsg); } } try (Table table = connection.getTable(tableName); RegionLocator regionLocator = connection.getRegionLocator(tableName)) { boolean noStrict = conf.getBoolean(NO_STRICT_COL_FAMILY, false); if(!noStrict) { ArrayList<String> unmatchedFamilies = new ArrayList<String>(); Set<String> cfSet = getColumnFamilies(columns); HTableDescriptor tDesc = table.getTableDescriptor(); for (String cf : cfSet) { if(tDesc.getFamily(Bytes.toBytes(cf)) == null) { unmatchedFamilies.add(cf); } } if(unmatchedFamilies.size() > 0) { ArrayList<String> familyNames = new ArrayList<String>(); for (HColumnDescriptor family : table.getTableDescriptor().getFamilies()) { familyNames.add(family.getNameAsString()); } String msg = "Column Families " + unmatchedFamilies + " specified in " + COLUMNS_CONF_KEY + " does not match with any of the table " + tableName + " column families " + familyNames + ".\n" + "To disable column family check, use -D" + NO_STRICT_COL_FAMILY + "=true.\n"; usage(msg); System.exit(-1); } } job.setReducerClass(PutSortReducer.class); Path outputDir = new Path(hfileOutPath); FileOutputFormat.setOutputPath(job, outputDir); job.setMapOutputKeyClass(ImmutableBytesWritable.class); if (mapperClass.equals(TsvImporterTextMapper.class)) { job.setMapOutputValueClass(Text.class); job.setReducerClass(TextSortReducer.class); } else { job.setMapOutputValueClass(Put.class); job.setCombinerClass(PutCombiner.class); } HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), regionLocator); } } else { if (!admin.tableExists(tableName)) { String errorMsg = format("Table '%s' does not exist.", tableName); LOG.error(errorMsg); throw new TableNotFoundException(errorMsg); } if (mapperClass.equals(TsvImporterTextMapper.class)) { usage(TsvImporterTextMapper.class.toString() + " should not be used for non bulkloading case. use " + TsvImporterMapper.class.toString() + " or custom mapper whose value type is Put."); System.exit(-1); } TableMapReduceUtil.initTableReducerJob(tableName.getNameAsString(), null, job); job.setNumReduceTasks(0); } TableMapReduceUtil.addDependencyJars(job); TableMapReduceUtil.addDependencyJars(job.getConfiguration(), com.google.common.base.Function.class ); } } return job; } static Job createSubmittableJob(Configuration conf, String[] args); @Override int run(String[] args); static void main(String[] args); final static String MAPPER_CONF_KEY; final static String BULK_OUTPUT_CONF_KEY; final static String TIMESTAMP_CONF_KEY; final static String JOB_NAME_CONF_KEY; final static String SKIP_LINES_CONF_KEY; final static String COLUMNS_CONF_KEY; final static String SEPARATOR_CONF_KEY; final static String ATTRIBUTE_SEPERATOR_CONF_KEY; final static String CREDENTIALS_LOCATION; final static String CREATE_TABLE_CONF_KEY; final static String NO_STRICT_COL_FAMILY; } | @Test public void testJobConfigurationsWithTsvImporterTextMapper() throws Exception { String table = "test-" + UUID.randomUUID(); Path bulkOutputPath = new Path(util.getDataTestDirOnTestFS(table),"hfiles"); String INPUT_FILE = "InputFile1.csv"; String[] args = new String[] { "-D" + ImportTsv.MAPPER_CONF_KEY + "=org.apache.hadoop.hbase.mapreduce.TsvImporterTextMapper", "-D" + ImportTsv.COLUMNS_CONF_KEY + "=HBASE_ROW_KEY,FAM:A,FAM:B", "-D" + ImportTsv.SEPARATOR_CONF_KEY + "=,", "-D" + ImportTsv.BULK_OUTPUT_CONF_KEY + "=" + bulkOutputPath.toString(), table, INPUT_FILE }; GenericOptionsParser opts = new GenericOptionsParser(util.getConfiguration(), args); args = opts.getRemainingArgs(); Job job = ImportTsv.createSubmittableJob(util.getConfiguration(), args); assertTrue(job.getMapperClass().equals(TsvImporterTextMapper.class)); assertTrue(job.getReducerClass().equals(TextSortReducer.class)); assertTrue(job.getMapOutputValueClass().equals(Text.class)); }
@Test(expected = TableNotFoundException.class) public void testWithoutAnExistingTableAndCreateTableSetToNo() throws Exception { String table = "test-" + UUID.randomUUID(); String[] args = new String[] { table, "/inputFile" }; Configuration conf = new Configuration(util.getConfiguration()); conf.set(ImportTsv.COLUMNS_CONF_KEY, "HBASE_ROW_KEY,FAM:A"); conf.set(ImportTsv.BULK_OUTPUT_CONF_KEY, "/output"); conf.set(ImportTsv.CREATE_TABLE_CONF_KEY, "no"); ImportTsv.createSubmittableJob(conf, args); }
@Test(expected = TableNotFoundException.class) public void testMRWithoutAnExistingTable() throws Exception { String table = "test-" + UUID.randomUUID(); String[] args = new String[] { table, "/inputFile" }; Configuration conf = new Configuration(util.getConfiguration()); ImportTsv.createSubmittableJob(conf, args); } |
TableMapReduceUtil { public static void initTableMapperJob(String table, Scan scan, Class<? extends TableMapper> mapper, Class<?> outputKeyClass, Class<?> outputValueClass, Job job) throws IOException { initTableMapperJob(table, scan, mapper, outputKeyClass, outputValueClass, job, true); } static void initTableMapperJob(String table, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job); static void initTableMapperJob(TableName table,
Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass,
Job job); static void initTableMapperJob(byte[] table, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job); static void initTableMapperJob(String table, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars, Class<? extends InputFormat> inputFormatClass); static void initTableMapperJob(String table, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars, boolean initCredentials,
Class<? extends InputFormat> inputFormatClass); static void initTableMapperJob(byte[] table, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars, Class<? extends InputFormat> inputFormatClass); static void initTableMapperJob(byte[] table, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars); static void initTableMapperJob(String table, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars); static void resetCacheConfig(Configuration conf); static void initMultiTableSnapshotMapperJob(Map<String, Collection<Scan>> snapshotScans,
Class<? extends TableMapper> mapper, Class<?> outputKeyClass, Class<?> outputValueClass,
Job job, boolean addDependencyJars, Path tmpRestoreDir); static void initTableSnapshotMapperJob(String snapshotName, Scan scan,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars, Path tmpRestoreDir); static void initTableMapperJob(List<Scan> scans,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job); static void initTableMapperJob(List<Scan> scans,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars); static void initTableMapperJob(List<Scan> scans,
Class<? extends TableMapper> mapper,
Class<?> outputKeyClass,
Class<?> outputValueClass, Job job,
boolean addDependencyJars,
boolean initCredentials); static void initCredentials(Job job); @Deprecated static void initCredentialsForCluster(Job job, String quorumAddress); static void initCredentialsForCluster(Job job, Configuration conf); static void initTableReducerJob(String table,
Class<? extends TableReducer> reducer, Job job); static void initTableReducerJob(String table,
Class<? extends TableReducer> reducer, Job job,
Class partitioner); static void initTableReducerJob(String table,
Class<? extends TableReducer> reducer, Job job,
Class partitioner, String quorumAddress, String serverClass,
String serverImpl); static void initTableReducerJob(String table,
Class<? extends TableReducer> reducer, Job job,
Class partitioner, String quorumAddress, String serverClass,
String serverImpl, boolean addDependencyJars); static void limitNumReduceTasks(String table, Job job); static void setNumReduceTasks(String table, Job job); static void setScannerCaching(Job job, int batchSize); static void addHBaseDependencyJars(Configuration conf); static String buildDependencyClasspath(Configuration conf); static void addDependencyJars(Job job); static void addDependencyJars(Configuration conf,
Class<?>... classes); } | @Test public void testInitTableMapperJob1() throws Exception { Configuration configuration = new Configuration(); Job job = new Job(configuration, "tableName"); TableMapReduceUtil.initTableMapperJob("Table", new Scan(), Import.Importer.class, Text.class, Text.class, job, false, WALInputFormat.class); assertEquals(WALInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); assertEquals(Text.class, job.getOutputValueClass()); assertNull(job.getCombinerClass()); assertEquals("Table", job.getConfiguration().get(TableInputFormat.INPUT_TABLE)); }
@Test public void testInitTableMapperJob2() throws Exception { Configuration configuration = new Configuration(); Job job = new Job(configuration, "tableName"); TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"), new Scan(), Import.Importer.class, Text.class, Text.class, job, false, WALInputFormat.class); assertEquals(WALInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); assertEquals(Text.class, job.getOutputValueClass()); assertNull(job.getCombinerClass()); assertEquals("Table", job.getConfiguration().get(TableInputFormat.INPUT_TABLE)); }
@Test public void testInitTableMapperJob3() throws Exception { Configuration configuration = new Configuration(); Job job = new Job(configuration, "tableName"); TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"), new Scan(), Import.Importer.class, Text.class, Text.class, job); assertEquals(TableInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); assertEquals(Text.class, job.getOutputValueClass()); assertNull(job.getCombinerClass()); assertEquals("Table", job.getConfiguration().get(TableInputFormat.INPUT_TABLE)); }
@Test public void testInitTableMapperJob4() throws Exception { Configuration configuration = new Configuration(); Job job = new Job(configuration, "tableName"); TableMapReduceUtil.initTableMapperJob(Bytes.toBytes("Table"), new Scan(), Import.Importer.class, Text.class, Text.class, job, false); assertEquals(TableInputFormat.class, job.getInputFormatClass()); assertEquals(Import.Importer.class, job.getMapperClass()); assertEquals(LongWritable.class, job.getOutputKeyClass()); assertEquals(Text.class, job.getOutputValueClass()); assertNull(job.getCombinerClass()); assertEquals("Table", job.getConfiguration().get(TableInputFormat.INPUT_TABLE)); } |
FSHDFSUtils extends FSUtils { boolean recoverLease(final DistributedFileSystem dfs, final int nbAttempt, final Path p, final long startWaiting) throws FileNotFoundException { boolean recovered = false; try { recovered = dfs.recoverLease(p); LOG.info((recovered? "Recovered lease, ": "Failed to recover lease, ") + getLogMessageDetail(nbAttempt, p, startWaiting)); } catch (IOException e) { if (e instanceof LeaseExpiredException && e.getMessage().contains("File does not exist")) { throw new FileNotFoundException("The given WAL wasn't found at " + p); } else if (e instanceof FileNotFoundException) { throw (FileNotFoundException)e; } LOG.warn(getLogMessageDetail(nbAttempt, p, startWaiting), e); } return recovered; } static boolean isSameHdfs(Configuration conf, FileSystem srcFs, FileSystem desFs); @Override void recoverFileLease(final FileSystem fs, final Path p,
Configuration conf, CancelableProgressable reporter); } | @Test (timeout = 30000) public void testRecoverLease() throws IOException { HTU.getConfiguration().setInt("hbase.lease.recovery.dfs.timeout", 1000); CancelableProgressable reporter = Mockito.mock(CancelableProgressable.class); Mockito.when(reporter.progress()).thenReturn(true); DistributedFileSystem dfs = Mockito.mock(DistributedFileSystem.class); Mockito.when(dfs.recoverLease(FILE)). thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(false).thenReturn(true); assertTrue(this.fsHDFSUtils.recoverDFSFileLease(dfs, FILE, HTU.getConfiguration(), reporter)); Mockito.verify(dfs, Mockito.times(5)).recoverLease(FILE); assertTrue((EnvironmentEdgeManager.currentTime() - this.startTime) > (3 * HTU.getConfiguration().getInt("hbase.lease.recovery.dfs.timeout", 61000))); } |
HFileOutputFormat extends FileOutputFormat<ImmutableBytesWritable, KeyValue> { @Override public RecordWriter<ImmutableBytesWritable, KeyValue> getRecordWriter( final TaskAttemptContext context) throws IOException, InterruptedException { return HFileOutputFormat2.createRecordWriter(context); } @Override RecordWriter<ImmutableBytesWritable, KeyValue> getRecordWriter(
final TaskAttemptContext context); static void configureIncrementalLoad(Job job, HTable table); static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY; } | @Test public void test_LATEST_TIMESTAMP_isReplaced() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter<ImmutableBytesWritable, KeyValue> writer = null; TaskAttemptContext context = null; Path dir = util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced"); try { Job job = new Job(conf); FileOutputFormat.setOutputPath(job, dir); context = createTestTaskAttemptContext(job); HFileOutputFormat hof = new HFileOutputFormat(); writer = hof.getRecordWriter(context); final byte [] b = Bytes.toBytes("b"); KeyValue kv = new KeyValue(b, b, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); assertFalse(original.equals(kv)); assertTrue(Bytes.equals(original.getRow(), kv.getRow())); assertTrue(CellUtil.matchingColumn(original, kv.getFamily(), kv.getQualifier())); assertNotSame(original.getTimestamp(), kv.getTimestamp()); assertNotSame(HConstants.LATEST_TIMESTAMP, kv.getTimestamp()); kv = new KeyValue(b, b, b, kv.getTimestamp() - 1, b); original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); assertTrue(original.equals(kv)); } finally { if (writer != null && context != null) writer.close(context); dir.getFileSystem(conf).delete(dir, true); } }
@Test public void test_TIMERANGE() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter<ImmutableBytesWritable, KeyValue> writer = null; TaskAttemptContext context = null; Path dir = util.getDataTestDir("test_TIMERANGE_present"); LOG.info("Timerange dir writing to dir: "+ dir); try { Job job = new Job(conf); FileOutputFormat.setOutputPath(job, dir); context = createTestTaskAttemptContext(job); HFileOutputFormat hof = new HFileOutputFormat(); writer = hof.getRecordWriter(context); final byte [] b = Bytes.toBytes("b"); KeyValue kv = new KeyValue(b, b, b, 2000, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); assertEquals(original,kv); kv = new KeyValue(b, b, b, 1000, b); original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); assertEquals(original, kv); writer.close(context); FileSystem fs = FileSystem.get(conf); Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent(); FileStatus[] sub1 = fs.listStatus(attemptDirectory); FileStatus[] file = fs.listStatus(sub1[0].getPath()); HFile.Reader rd = HFile.createReader(fs, file[0].getPath(), new CacheConfig(conf), conf); Map<byte[],byte[]> finfo = rd.loadFileInfo(); byte[] range = finfo.get("TIMERANGE".getBytes()); assertNotNull(range); TimeRangeTracker timeRangeTracker = new TimeRangeTracker(); Writables.copyWritable(range, timeRangeTracker); LOG.info(timeRangeTracker.getMinimumTimestamp() + "...." + timeRangeTracker.getMaximumTimestamp()); assertEquals(1000, timeRangeTracker.getMinimumTimestamp()); assertEquals(2000, timeRangeTracker.getMaximumTimestamp()); rd.close(); } finally { if (writer != null && context != null) writer.close(context); dir.getFileSystem(conf).delete(dir, true); } } |
HFileOutputFormat extends FileOutputFormat<ImmutableBytesWritable, KeyValue> { public static void configureIncrementalLoad(Job job, HTable table) throws IOException { HFileOutputFormat2.configureIncrementalLoad(job, table.getTableDescriptor(), table.getRegionLocator()); } @Override RecordWriter<ImmutableBytesWritable, KeyValue> getRecordWriter(
final TaskAttemptContext context); static void configureIncrementalLoad(Job job, HTable table); static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY; } | @Test public void testJobConfiguration() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); conf.set("hbase.fs.tmp.dir", util.getDataTestDir("testJobConfiguration").toString()); Job job = new Job(conf); job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration")); HTableDescriptor tableDescriptor = Mockito.mock(HTableDescriptor.class); RegionLocator regionLocator = Mockito.mock(RegionLocator.class); setupMockStartKeys(regionLocator); HFileOutputFormat2.configureIncrementalLoad(job, tableDescriptor, regionLocator); assertEquals(job.getNumReduceTasks(), 4); } |
SyncTable extends Configured implements Tool { public SyncTable(Configuration conf) { super(conf); } SyncTable(Configuration conf); Job createSubmittableJob(String[] args); static void main(String[] args); @Override int run(String[] args); } | @Test public void testSyncTable() throws Exception { String sourceTableName = "testSourceTable"; String targetTableName = "testTargetTable"; Path testDir = TEST_UTIL.getDataTestDirOnTestFS("testSyncTable"); writeTestData(sourceTableName, targetTableName); hashSourceTable(sourceTableName, testDir); Counters syncCounters = syncTables(sourceTableName, targetTableName, testDir); assertEqualTables(90, sourceTableName, targetTableName); assertEquals(60, syncCounters.findCounter(Counter.ROWSWITHDIFFS).getValue()); assertEquals(10, syncCounters.findCounter(Counter.SOURCEMISSINGROWS).getValue()); assertEquals(10, syncCounters.findCounter(Counter.TARGETMISSINGROWS).getValue()); assertEquals(50, syncCounters.findCounter(Counter.SOURCEMISSINGCELLS).getValue()); assertEquals(50, syncCounters.findCounter(Counter.TARGETMISSINGCELLS).getValue()); assertEquals(20, syncCounters.findCounter(Counter.DIFFERENTCELLVALUES).getValue()); TEST_UTIL.deleteTable(sourceTableName); TEST_UTIL.deleteTable(targetTableName); TEST_UTIL.cleanupDataTestDirOnTestFS(); } |
HFileOutputFormat2 extends FileOutputFormat<ImmutableBytesWritable, Cell> { @Override public RecordWriter<ImmutableBytesWritable, Cell> getRecordWriter( final TaskAttemptContext context) throws IOException, InterruptedException { return createRecordWriter(context); } @Override RecordWriter<ImmutableBytesWritable, Cell> getRecordWriter(
final TaskAttemptContext context); @Deprecated static void configureIncrementalLoad(Job job, HTable table); static void configureIncrementalLoad(Job job, Table table, RegionLocator regionLocator); static void configureIncrementalLoad(Job job, HTableDescriptor tableDescriptor,
RegionLocator regionLocator); static void configureIncrementalLoadMap(Job job, Table table); static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY; } | @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void test_LATEST_TIMESTAMP_isReplaced() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter<ImmutableBytesWritable, Cell> writer = null; TaskAttemptContext context = null; Path dir = util.getDataTestDir("test_LATEST_TIMESTAMP_isReplaced"); try { Job job = new Job(conf); FileOutputFormat.setOutputPath(job, dir); context = createTestTaskAttemptContext(job); HFileOutputFormat2 hof = new HFileOutputFormat2(); writer = hof.getRecordWriter(context); final byte [] b = Bytes.toBytes("b"); KeyValue kv = new KeyValue(b, b, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); assertFalse(original.equals(kv)); assertTrue(Bytes.equals(CellUtil.cloneRow(original), CellUtil.cloneRow(kv))); assertTrue(Bytes.equals(CellUtil.cloneFamily(original), CellUtil.cloneFamily(kv))); assertTrue(Bytes.equals(CellUtil.cloneQualifier(original), CellUtil.cloneQualifier(kv))); assertNotSame(original.getTimestamp(), kv.getTimestamp()); assertNotSame(HConstants.LATEST_TIMESTAMP, kv.getTimestamp()); kv = new KeyValue(b, b, b, kv.getTimestamp() - 1, b); original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); assertTrue(original.equals(kv)); } finally { if (writer != null && context != null) writer.close(context); dir.getFileSystem(conf).delete(dir, true); } }
@Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void test_TIMERANGE() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); RecordWriter<ImmutableBytesWritable, Cell> writer = null; TaskAttemptContext context = null; Path dir = util.getDataTestDir("test_TIMERANGE_present"); LOG.info("Timerange dir writing to dir: "+ dir); try { Job job = new Job(conf); FileOutputFormat.setOutputPath(job, dir); context = createTestTaskAttemptContext(job); HFileOutputFormat2 hof = new HFileOutputFormat2(); writer = hof.getRecordWriter(context); final byte [] b = Bytes.toBytes("b"); KeyValue kv = new KeyValue(b, b, b, 2000, b); KeyValue original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); assertEquals(original,kv); kv = new KeyValue(b, b, b, 1000, b); original = kv.clone(); writer.write(new ImmutableBytesWritable(), kv); assertEquals(original, kv); writer.close(context); FileSystem fs = FileSystem.get(conf); Path attemptDirectory = hof.getDefaultWorkFile(context, "").getParent(); FileStatus[] sub1 = fs.listStatus(attemptDirectory); FileStatus[] file = fs.listStatus(sub1[0].getPath()); HFile.Reader rd = HFile.createReader(fs, file[0].getPath(), new CacheConfig(conf), conf); Map<byte[],byte[]> finfo = rd.loadFileInfo(); byte[] range = finfo.get("TIMERANGE".getBytes()); assertNotNull(range); TimeRangeTracker timeRangeTracker = new TimeRangeTracker(); Writables.copyWritable(range, timeRangeTracker); LOG.info(timeRangeTracker.getMinimumTimestamp() + "...." + timeRangeTracker.getMaximumTimestamp()); assertEquals(1000, timeRangeTracker.getMinimumTimestamp()); assertEquals(2000, timeRangeTracker.getMaximumTimestamp()); rd.close(); } finally { if (writer != null && context != null) writer.close(context); dir.getFileSystem(conf).delete(dir, true); } } |
HFileOutputFormat2 extends FileOutputFormat<ImmutableBytesWritable, Cell> { @Deprecated public static void configureIncrementalLoad(Job job, HTable table) throws IOException { configureIncrementalLoad(job, table.getTableDescriptor(), table.getRegionLocator()); } @Override RecordWriter<ImmutableBytesWritable, Cell> getRecordWriter(
final TaskAttemptContext context); @Deprecated static void configureIncrementalLoad(Job job, HTable table); static void configureIncrementalLoad(Job job, Table table, RegionLocator regionLocator); static void configureIncrementalLoad(Job job, HTableDescriptor tableDescriptor,
RegionLocator regionLocator); static void configureIncrementalLoadMap(Job job, Table table); static final String DATABLOCK_ENCODING_OVERRIDE_CONF_KEY; } | @Ignore("Goes zombie too frequently; needs work. See HBASE-14563") @Test public void testJobConfiguration() throws Exception { Configuration conf = new Configuration(this.util.getConfiguration()); conf.set(HConstants.TEMPORARY_FS_DIRECTORY_KEY, util.getDataTestDir("testJobConfiguration") .toString()); Job job = new Job(conf); job.setWorkingDirectory(util.getDataTestDir("testJobConfiguration")); RegionLocator regionLocator = Mockito.mock(RegionLocator.class); setupMockStartKeys(regionLocator); HFileOutputFormat2.configureIncrementalLoad(job, new HTableDescriptor(), regionLocator); assertEquals(job.getNumReduceTasks(), 4); } |
JarFinder { public static String getJar(Class klass) { Preconditions.checkNotNull(klass, "klass"); ClassLoader loader = klass.getClassLoader(); if (loader != null) { String class_file = klass.getName().replaceAll("\\.", "/") + ".class"; try { for (Enumeration itr = loader.getResources(class_file); itr.hasMoreElements(); ) { URL url = (URL) itr.nextElement(); String path = url.getPath(); if (path.startsWith("file:")) { path = path.substring("file:".length()); } path = URLDecoder.decode(path, "UTF-8"); if ("jar".equals(url.getProtocol())) { path = URLDecoder.decode(path, "UTF-8"); return path.replaceAll("!.*$", ""); } else if ("file".equals(url.getProtocol())) { String klassName = klass.getName(); klassName = klassName.replace(".", "/") + ".class"; path = path.substring(0, path.length() - klassName.length()); File baseDir = new File(path); File testDir = new File(System.getProperty("test.build.dir", "target/test-dir")); testDir = testDir.getAbsoluteFile(); if (!testDir.exists()) { testDir.mkdirs(); } File tempJar = File.createTempFile("hadoop-", "", testDir); tempJar = new File(tempJar.getAbsolutePath() + ".jar"); tempJar.deleteOnExit(); createJar(baseDir, tempJar); return tempJar.getAbsolutePath(); } } } catch (IOException e) { throw new RuntimeException(e); } } return null; } static void jarDir(File dir, String relativePath, ZipOutputStream zos); static String getJar(Class klass); } | @Test public void testJar() throws Exception { String jar = JarFinder.getJar(LogFactory.class); Assert.assertTrue(new File(jar).exists()); }
@Test public void testExpandedClasspath() throws Exception { String jar = JarFinder.getJar(TestJarFinder.class); Assert.assertTrue(new File(jar).exists()); } |
CellCounter { public static void main(String[] args) throws Exception { Configuration conf = HBaseConfiguration.create(); String[] otherArgs = new GenericOptionsParser(conf, args).getRemainingArgs(); if (otherArgs.length < 2) { System.err.println("ERROR: Wrong number of parameters: " + args.length); System.err.println("Usage: CellCounter "); System.err.println(" <tablename> <outputDir> <reportSeparator> [^[regex pattern] or " + "[Prefix] for row filter]] --starttime=[starttime] --endtime=[endtime]"); System.err.println(" Note: -D properties will be applied to the conf used. "); System.err.println(" Additionally, the following SCAN properties can be specified"); System.err.println(" to get fine grained control on what is counted.."); System.err.println(" -D " + TableInputFormat.SCAN_COLUMN_FAMILY + "=<familyName>"); System.err.println(" <reportSeparator> parameter can be used to override the default report separator " + "string : used to separate the rowId/column family name and qualifier name."); System.err.println(" [^[regex pattern] or [Prefix] parameter can be used to limit the cell counter count " + "operation to a limited subset of rows from the table based on regex or prefix pattern."); System.exit(-1); } Job job = createSubmittableJob(conf, otherArgs); System.exit(job.waitForCompletion(true) ? 0 : 1); } static Job createSubmittableJob(Configuration conf, String[] args); static void main(String[] args); } | @Test (timeout=300000) public void testCellCounterMain() throws Exception { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {}; System.setErr(new PrintStream(data)); try { System.setErr(new PrintStream(data)); try { CellCounter.main(args); fail("should be SecurityException"); } catch (SecurityException e) { assertEquals(-1, newSecurityManager.getExitCode()); assertTrue(data.toString().contains("ERROR: Wrong number of parameters:")); assertTrue(data.toString().contains("Usage:")); } } finally { System.setErr(oldPrintStream); System.setSecurityManager(SECURITY_MANAGER); } }
@Test (timeout=300000) public void TestCellCounterWithoutOutputDir() throws Exception { PrintStream oldPrintStream = System.err; SecurityManager SECURITY_MANAGER = System.getSecurityManager(); LauncherSecurityManager newSecurityManager= new LauncherSecurityManager(); System.setSecurityManager(newSecurityManager); ByteArrayOutputStream data = new ByteArrayOutputStream(); String[] args = {"tableName"}; System.setErr(new PrintStream(data)); try { System.setErr(new PrintStream(data)); try { CellCounter.main(args); fail("should be SecurityException"); } catch (SecurityException e) { assertEquals(-1, newSecurityManager.getExitCode()); assertTrue(data.toString().contains("ERROR: Wrong number of parameters:")); assertTrue(data.toString().contains("Usage:")); } } finally { System.setErr(oldPrintStream); System.setSecurityManager(SECURITY_MANAGER); } } |
ReplicationSink { public void replicateEntries(List<WALEntry> entries, final CellScanner cells) throws IOException { if (entries.isEmpty()) return; if (cells == null) throw new NullPointerException("TODO: Add handling of null CellScanner"); try { long totalReplicated = 0; Map<TableName, Map<List<UUID>, List<Row>>> rowMap = new TreeMap<TableName, Map<List<UUID>, List<Row>>>(); for (WALEntry entry : entries) { TableName table = TableName.valueOf(entry.getKey().getTableName().toByteArray()); Cell previousCell = null; Mutation m = null; int count = entry.getAssociatedCellCount(); for (int i = 0; i < count; i++) { if (!cells.advance()) { throw new ArrayIndexOutOfBoundsException("Expected=" + count + ", index=" + i); } Cell cell = cells.current(); if (isNewRowOrType(previousCell, cell)) { m = CellUtil.isDelete(cell)? new Delete(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()): new Put(cell.getRowArray(), cell.getRowOffset(), cell.getRowLength()); List<UUID> clusterIds = new ArrayList<UUID>(); for(HBaseProtos.UUID clusterId : entry.getKey().getClusterIdsList()){ clusterIds.add(toUUID(clusterId)); } m.setClusterIds(clusterIds); addToHashMultiMap(rowMap, table, clusterIds, m); } if (CellUtil.isDelete(cell)) { ((Delete)m).addDeleteMarker(cell); } else { ((Put)m).add(cell); } previousCell = cell; } totalReplicated++; } for (Entry<TableName, Map<List<UUID>,List<Row>>> entry : rowMap.entrySet()) { batch(entry.getKey(), entry.getValue().values()); } int size = entries.size(); this.metrics.setAgeOfLastAppliedOp(entries.get(size - 1).getKey().getWriteTime()); this.metrics.applyBatch(size); this.totalReplicatedEdits.addAndGet(totalReplicated); } catch (IOException ex) { LOG.error("Unable to accept edit because:", ex); throw ex; } } ReplicationSink(Configuration conf, Stoppable stopper); void replicateEntries(List<WALEntry> entries, final CellScanner cells); void stopReplicationSinkServices(); String getStats(); MetricsSink getSinkMetrics(); } | @Test public void testBatchSink() throws Exception { List<WALEntry> entries = new ArrayList<WALEntry>(BATCH_SIZE); List<Cell> cells = new ArrayList<Cell>(); for(int i = 0; i < BATCH_SIZE; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); Scan scan = new Scan(); ResultScanner scanRes = table1.getScanner(scan); assertEquals(BATCH_SIZE, scanRes.next(BATCH_SIZE).length); }
@Test public void testMixedPutDelete() throws Exception { List<WALEntry> entries = new ArrayList<WALEntry>(BATCH_SIZE/2); List<Cell> cells = new ArrayList<Cell>(); for(int i = 0; i < BATCH_SIZE/2; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } SINK.replicateEntries(entries, CellUtil.createCellScanner(cells)); entries = new ArrayList<WALEntry>(BATCH_SIZE); cells = new ArrayList<Cell>(); for(int i = 0; i < BATCH_SIZE; i++) { entries.add(createEntry(TABLE_NAME1, i, i % 2 != 0 ? KeyValue.Type.Put: KeyValue.Type.DeleteColumn, cells)); } SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); Scan scan = new Scan(); ResultScanner scanRes = table1.getScanner(scan); assertEquals(BATCH_SIZE/2, scanRes.next(BATCH_SIZE).length); }
@Test public void testMixedPutTables() throws Exception { List<WALEntry> entries = new ArrayList<WALEntry>(BATCH_SIZE/2); List<Cell> cells = new ArrayList<Cell>(); for(int i = 0; i < BATCH_SIZE; i++) { entries.add(createEntry( i % 2 == 0 ? TABLE_NAME2 : TABLE_NAME1, i, KeyValue.Type.Put, cells)); } SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); Scan scan = new Scan(); ResultScanner scanRes = table2.getScanner(scan); for(Result res : scanRes) { assertTrue(Bytes.toInt(res.getRow()) % 2 == 0); } }
@Test public void testMixedDeletes() throws Exception { List<WALEntry> entries = new ArrayList<WALEntry>(3); List<Cell> cells = new ArrayList<Cell>(); for(int i = 0; i < 3; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); entries = new ArrayList<WALEntry>(3); cells = new ArrayList<Cell>(); entries.add(createEntry(TABLE_NAME1, 0, KeyValue.Type.DeleteColumn, cells)); entries.add(createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily, cells)); entries.add(createEntry(TABLE_NAME1, 2, KeyValue.Type.DeleteColumn, cells)); SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); Scan scan = new Scan(); ResultScanner scanRes = table1.getScanner(scan); assertEquals(0, scanRes.next(3).length); }
@Test public void testApplyDeleteBeforePut() throws Exception { List<WALEntry> entries = new ArrayList<WALEntry>(5); List<Cell> cells = new ArrayList<Cell>(); for(int i = 0; i < 2; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } entries.add(createEntry(TABLE_NAME1, 1, KeyValue.Type.DeleteFamily, cells)); for(int i = 3; i < 5; i++) { entries.add(createEntry(TABLE_NAME1, i, KeyValue.Type.Put, cells)); } SINK.replicateEntries(entries, CellUtil.createCellScanner(cells.iterator())); Get get = new Get(Bytes.toBytes(1)); Result res = table1.get(get); assertEquals(0, res.size()); } |
ReplicationSourceManager implements ReplicationListener { protected void init() throws IOException, ReplicationException { for (String id : this.replicationPeers.getPeerIds()) { addSource(id); } List<String> currentReplicators = this.replicationQueues.getListOfReplicators(); if (currentReplicators == null || currentReplicators.size() == 0) { return; } List<String> otherRegionServers = replicationTracker.getListOfRegionServers(); LOG.info("Current list of replicators: " + currentReplicators + " other RSs: " + otherRegionServers); for (String rs : currentReplicators) { if (!otherRegionServers.contains(rs)) { transferQueues(rs); } } } ReplicationSourceManager(final ReplicationQueues replicationQueues,
final ReplicationPeers replicationPeers, final ReplicationTracker replicationTracker,
final Configuration conf, final Server server, final FileSystem fs, final Path logDir,
final Path oldLogDir, final UUID clusterId); void logPositionAndCleanOldLogs(Path log, String id, long position,
boolean queueRecovered, boolean holdLogInZK); void cleanOldLogs(String key, String id, boolean queueRecovered); void deleteSource(String peerId, boolean closeConnection); void join(); List<ReplicationSourceInterface> getSources(); List<ReplicationSourceInterface> getOldSources(); void closeRecoveredQueue(ReplicationSourceInterface src); void removePeer(String id); @Override void regionServerRemoved(String regionserver); @Override void peerRemoved(String peerId); @Override void peerListChanged(List<String> peerIds); Path getOldLogDir(); Path getLogDir(); FileSystem getFs(); String getStats(); } | @Test public void testClaimQueues() throws Exception { LOG.debug("testNodeFailoverWorkerCopyQueuesFromRSUsingMulti"); conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true); final Server server = new DummyServer("hostname0.example.org"); ReplicationQueues rq = ReplicationFactory.getReplicationQueues(server.getZooKeeper(), server.getConfiguration(), server); rq.init(server.getServerName().toString()); files.add("log1"); files.add("log2"); for (String file : files) { rq.addLog("1", file); } Server s1 = new DummyServer("dummyserver1.example.org"); Server s2 = new DummyServer("dummyserver2.example.org"); Server s3 = new DummyServer("dummyserver3.example.org"); DummyNodeFailoverWorker w1 = new DummyNodeFailoverWorker( server.getServerName().getServerName(), s1); DummyNodeFailoverWorker w2 = new DummyNodeFailoverWorker( server.getServerName().getServerName(), s2); DummyNodeFailoverWorker w3 = new DummyNodeFailoverWorker( server.getServerName().getServerName(), s3); latch = new CountDownLatch(3); w1.start(); w2.start(); w3.start(); int populatedMap = 0; latch.await(); populatedMap += w1.isLogZnodesMapPopulated() + w2.isLogZnodesMapPopulated() + w3.isLogZnodesMapPopulated(); assertEquals(1, populatedMap); server.abort("", null); }
@Test public void testNodeFailoverDeadServerParsing() throws Exception { LOG.debug("testNodeFailoverDeadServerParsing"); conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true); final Server server = new DummyServer("ec2-54-234-230-108.compute-1.amazonaws.com"); ReplicationQueues repQueues = ReplicationFactory.getReplicationQueues(server.getZooKeeper(), conf, server); repQueues.init(server.getServerName().toString()); files.add("log1"); files.add("log2"); for (String file : files) { repQueues.addLog("1", file); } Server s1 = new DummyServer("ip-10-8-101-114.ec2.internal"); Server s2 = new DummyServer("ec2-107-20-52-47.compute-1.amazonaws.com"); Server s3 = new DummyServer("ec2-23-20-187-167.compute-1.amazonaws.com"); ReplicationQueues rq1 = ReplicationFactory.getReplicationQueues(s1.getZooKeeper(), s1.getConfiguration(), s1); rq1.init(s1.getServerName().toString()); SortedMap<String, SortedSet<String>> testMap = rq1.claimQueues(server.getServerName().getServerName()); ReplicationQueues rq2 = ReplicationFactory.getReplicationQueues(s2.getZooKeeper(), s2.getConfiguration(), s2); rq2.init(s2.getServerName().toString()); testMap = rq2.claimQueues(s1.getServerName().getServerName()); ReplicationQueues rq3 = ReplicationFactory.getReplicationQueues(s3.getZooKeeper(), s3.getConfiguration(), s3); rq3.init(s3.getServerName().toString()); testMap = rq3.claimQueues(s2.getServerName().getServerName()); ReplicationQueueInfo replicationQueueInfo = new ReplicationQueueInfo(testMap.firstKey()); List<String> result = replicationQueueInfo.getDeadRegionServers(); assertTrue(result.contains(server.getServerName().getServerName())); assertTrue(result.contains(s1.getServerName().getServerName())); assertTrue(result.contains(s2.getServerName().getServerName())); server.abort("", null); }
@Test public void testFailoverDeadServerCversionChange() throws Exception { LOG.debug("testFailoverDeadServerCversionChange"); conf.setBoolean(HConstants.ZOOKEEPER_USEMULTI, true); final Server s0 = new DummyServer("cversion-change0.example.org"); ReplicationQueues repQueues = ReplicationFactory.getReplicationQueues(s0.getZooKeeper(), conf, s0); repQueues.init(s0.getServerName().toString()); files.add("log1"); files.add("log2"); for (String file : files) { repQueues.addLog("1", file); } Server s1 = new DummyServer("cversion-change1.example.org"); ReplicationQueues rq1 = ReplicationFactory.getReplicationQueues(s1.getZooKeeper(), s1.getConfiguration(), s1); rq1.init(s1.getServerName().toString()); ReplicationQueuesClient client = ReplicationFactory.getReplicationQueuesClient(s1.getZooKeeper(), s1.getConfiguration(), s1); int v0 = client.getQueuesZNodeCversion(); rq1.claimQueues(s0.getServerName().getServerName()); int v1 = client.getQueuesZNodeCversion(); assertEquals(v0 + 1, v1); s0.abort("", null); } |
FSHDFSUtils extends FSUtils { public static boolean isSameHdfs(Configuration conf, FileSystem srcFs, FileSystem desFs) { String srcServiceName = srcFs.getCanonicalServiceName(); String desServiceName = desFs.getCanonicalServiceName(); if (srcServiceName == null || desServiceName == null) { return false; } if (srcServiceName.equals(desServiceName)) { return true; } if (srcServiceName.startsWith("ha-hdfs") && desServiceName.startsWith("ha-hdfs")) { Collection<String> internalNameServices = conf.getTrimmedStringCollection("dfs.internal.nameservices"); if (!internalNameServices.isEmpty()) { if (internalNameServices.contains(srcServiceName.split(":")[1])) { return true; } else { return false; } } } if (srcFs instanceof DistributedFileSystem && desFs instanceof DistributedFileSystem) { Set<InetSocketAddress> srcAddrs = getNNAddresses((DistributedFileSystem) srcFs, conf); Set<InetSocketAddress> desAddrs = getNNAddresses((DistributedFileSystem) desFs, conf); if (Sets.intersection(srcAddrs, desAddrs).size() > 0) { return true; } } return false; } static boolean isSameHdfs(Configuration conf, FileSystem srcFs, FileSystem desFs); @Override void recoverFileLease(final FileSystem fs, final Path p,
Configuration conf, CancelableProgressable reporter); } | @Test public void testIsSameHdfs() throws IOException { try { Class dfsUtilClazz = Class.forName("org.apache.hadoop.hdfs.DFSUtil"); dfsUtilClazz.getMethod("getNNServiceRpcAddresses", Configuration.class); } catch (Exception e) { LOG.info("Skip testIsSameHdfs test case because of the no-HA hadoop version."); return; } Configuration conf = HBaseConfiguration.create(); Path srcPath = new Path("hdfs: Path desPath = new Path("hdfs: FileSystem srcFs = srcPath.getFileSystem(conf); FileSystem desFs = desPath.getFileSystem(conf); assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); desPath = new Path("hdfs: desFs = desPath.getFileSystem(conf); assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); desPath = new Path("hdfs: desFs = desPath.getFileSystem(conf); assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); conf.set("fs.defaultFS", "hdfs: conf.set("dfs.nameservices", "haosong-hadoop"); conf.set("dfs.ha.namenodes.haosong-hadoop", "nn1,nn2"); conf.set("dfs.client.failover.proxy.provider.haosong-hadoop", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider"); conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1", "127.0.0.1:8020"); conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2", "127.10.2.1:8000"); desPath = new Path("/"); desFs = desPath.getFileSystem(conf); assertTrue(FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn1", "127.10.2.1:8020"); conf.set("dfs.namenode.rpc-address.haosong-hadoop.nn2", "127.0.0.1:8000"); desPath = new Path("/"); desFs = desPath.getFileSystem(conf); assertTrue(!FSHDFSUtils.isSameHdfs(conf, srcFs, desFs)); } |
ReplicationSinkManager { public synchronized void chooseSinks() { List<ServerName> slaveAddresses = endpoint.getRegionServers(); Collections.shuffle(slaveAddresses, random); int numSinks = (int) Math.ceil(slaveAddresses.size() * ratio); sinks = slaveAddresses.subList(0, numSinks); lastUpdateToPeers = System.currentTimeMillis(); badReportCounts.clear(); } ReplicationSinkManager(HConnection conn, String peerClusterId,
HBaseReplicationEndpoint endpoint, Configuration conf); synchronized SinkPeer getReplicationSink(); synchronized void reportBadSink(SinkPeer sinkPeer); synchronized void reportSinkSuccess(SinkPeer sinkPeer); synchronized void chooseSinks(); synchronized int getNumSinks(); } | @Test public void testChooseSinks() { List<ServerName> serverNames = Lists.newArrayList(); for (int i = 0; i < 20; i++) { serverNames.add(mock(ServerName.class)); } when(replicationEndpoint.getRegionServers()) .thenReturn(serverNames); sinkManager.chooseSinks(); assertEquals(2, sinkManager.getNumSinks()); } |
ReplicationSinkManager { public synchronized void reportBadSink(SinkPeer sinkPeer) { ServerName serverName = sinkPeer.getServerName(); int badReportCount = (badReportCounts.containsKey(serverName) ? badReportCounts.get(serverName) : 0) + 1; badReportCounts.put(serverName, badReportCount); if (badReportCount > badSinkThreshold) { this.sinks.remove(serverName); if (sinks.isEmpty()) { chooseSinks(); } } } ReplicationSinkManager(HConnection conn, String peerClusterId,
HBaseReplicationEndpoint endpoint, Configuration conf); synchronized SinkPeer getReplicationSink(); synchronized void reportBadSink(SinkPeer sinkPeer); synchronized void reportSinkSuccess(SinkPeer sinkPeer); synchronized void chooseSinks(); synchronized int getNumSinks(); } | @Test public void testReportBadSink() { ServerName serverNameA = mock(ServerName.class); ServerName serverNameB = mock(ServerName.class); when(replicationEndpoint.getRegionServers()) .thenReturn(Lists.newArrayList(serverNameA, serverNameB)); sinkManager.chooseSinks(); assertEquals(1, sinkManager.getNumSinks()); SinkPeer sinkPeer = new SinkPeer(serverNameA, mock(AdminService.BlockingInterface.class)); sinkManager.reportBadSink(sinkPeer); assertEquals(1, sinkManager.getNumSinks()); } |
ConfigurationManager { public void notifyAllObservers(Configuration conf) { LOG.info("Starting to notify all observers that config changed."); synchronized (configurationObservers) { for (ConfigurationObserver observer : configurationObservers) { try { if (observer != null) { observer.onConfigurationChange(conf); } } catch (Throwable t) { LOG.error("Encountered a throwable while notifying observers: " + " of type : " + observer.getClass().getCanonicalName() + "(" + observer + ")", t); } } } } void registerObserver(ConfigurationObserver observer); void deregisterObserver(ConfigurationObserver observer); void notifyAllObservers(Configuration conf); int getNumObservers(); } | @Test public void testCheckIfObserversNotified() { Configuration conf = new Configuration(); ConfigurationManager cm = new ConfigurationManager(); DummyConfigurationObserver d1 = new DummyConfigurationObserver(cm); cm.notifyAllObservers(conf); assertTrue(d1.wasNotifiedOnChange()); d1.resetNotifiedOnChange(); DummyConfigurationObserver d2 = new DummyConfigurationObserver(cm); cm.notifyAllObservers(conf); assertTrue(d1.wasNotifiedOnChange()); d1.resetNotifiedOnChange(); assertTrue(d2.wasNotifiedOnChange()); d2.resetNotifiedOnChange(); d2.deregister(); cm.notifyAllObservers(conf); assertTrue(d1.wasNotifiedOnChange()); d1.resetNotifiedOnChange(); assertFalse(d2.wasNotifiedOnChange()); } |
DefaultWALProvider implements WALProvider { public static ServerName getServerNameFromWALDirectoryName(Configuration conf, String path) throws IOException { if (path == null || path.length() <= HConstants.HREGION_LOGDIR_NAME.length()) { return null; } if (conf == null) { throw new IllegalArgumentException("parameter conf must be set"); } final String rootDir = conf.get(HConstants.HBASE_DIR); if (rootDir == null || rootDir.isEmpty()) { throw new IllegalArgumentException(HConstants.HBASE_DIR + " key not found in conf."); } final StringBuilder startPathSB = new StringBuilder(rootDir); if (!rootDir.endsWith("/")) startPathSB.append('/'); startPathSB.append(HConstants.HREGION_LOGDIR_NAME); if (!HConstants.HREGION_LOGDIR_NAME.endsWith("/")) startPathSB.append('/'); final String startPath = startPathSB.toString(); String fullPath; try { fullPath = FileSystem.get(conf).makeQualified(new Path(path)).toString(); } catch (IllegalArgumentException e) { LOG.info("Call to makeQualified failed on " + path + " " + e.getMessage()); return null; } if (!fullPath.startsWith(startPath)) { return null; } final String serverNameAndFile = fullPath.substring(startPath.length()); if (serverNameAndFile.indexOf('/') < "a,0,0".length()) { return null; } Path p = new Path(path); return getServerNameFromWALDirectoryName(p); } @Override void init(final WALFactory factory, final Configuration conf,
final List<WALActionsListener> listeners, String providerId); @Override WAL getWAL(final byte[] identifier); @Override void close(); @Override void shutdown(); static long getNumLogFiles(WALFactory walFactory); static long getLogFileSize(WALFactory walFactory); @VisibleForTesting static int getNumRolledLogFiles(WAL wal); @VisibleForTesting static Path getCurrentFileName(final WAL wal); @VisibleForTesting static long extractFileNumFromWAL(final WAL wal); static boolean validateWALFilename(String filename); static String getWALDirectoryName(final String serverName); static ServerName getServerNameFromWALDirectoryName(Configuration conf, String path); static ServerName getServerNameFromWALDirectoryName(Path logFile); static boolean isMetaFile(Path p); static boolean isMetaFile(String p); static Writer createWriter(final Configuration conf, final FileSystem fs, final Path path,
final boolean overwritable); static String getWALPrefixFromWALName(String name); static final String WAL_FILE_NAME_DELIMITER; @VisibleForTesting
static final String META_WAL_PROVIDER_ID; static final String SPLITTING_EXT; } | @Test public void testGetServerNameFromWALDirectoryName() throws IOException { ServerName sn = ServerName.valueOf("hn", 450, 1398); String hl = FSUtils.getRootDir(conf) + "/" + DefaultWALProvider.getWALDirectoryName(sn.toString()); assertNull(DefaultWALProvider.getServerNameFromWALDirectoryName(conf, null)); assertNull(DefaultWALProvider.getServerNameFromWALDirectoryName(conf, FSUtils.getRootDir(conf).toUri().toString())); assertNull(DefaultWALProvider.getServerNameFromWALDirectoryName(conf, "")); assertNull(DefaultWALProvider.getServerNameFromWALDirectoryName(conf, " ")); assertNull(DefaultWALProvider.getServerNameFromWALDirectoryName(conf, hl)); assertNull(DefaultWALProvider.getServerNameFromWALDirectoryName(conf, hl + "qdf")); assertNull(DefaultWALProvider.getServerNameFromWALDirectoryName(conf, "sfqf" + hl + "qdf")); final String wals = "/WALs/"; ServerName parsed = DefaultWALProvider.getServerNameFromWALDirectoryName(conf, FSUtils.getRootDir(conf).toUri().toString() + wals + sn + "/localhost%2C32984%2C1343316388997.1343316390417"); assertEquals("standard", sn, parsed); parsed = DefaultWALProvider.getServerNameFromWALDirectoryName(conf, hl + "/qdf"); assertEquals("subdir", sn, parsed); parsed = DefaultWALProvider.getServerNameFromWALDirectoryName(conf, FSUtils.getRootDir(conf).toUri().toString() + wals + sn + "-splitting/localhost%3A57020.1340474893931"); assertEquals("split", sn, parsed); } |
BoundedRegionGroupingProvider extends RegionGroupingProvider { @Override public void close() throws IOException { IOException failure = null; for (WALProvider provider : delegates) { try { provider.close(); } catch (IOException exception) { LOG.error("Problem closing provider '" + provider + "': " + exception.getMessage()); LOG.debug("Details of problem shutting down provider '" + provider + "'", exception); failure = exception; } } if (failure != null) { throw failure; } } @Override void init(final WALFactory factory, final Configuration conf,
final List<WALActionsListener> listeners, final String providerId); @Override void shutdown(); @Override void close(); static long getNumLogFiles(WALFactory walFactory); static long getLogFileSize(WALFactory walFactory); } | @Test public void setMembershipDedups() throws IOException { final int temp = conf.getInt(NUM_REGION_GROUPS, DEFAULT_NUM_REGION_GROUPS); WALFactory wals = null; try { conf.setInt(NUM_REGION_GROUPS, temp*4); FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDirOnTestFS()); wals = new WALFactory(conf, null, currentTest.getMethodName()); final Set<WAL> seen = new HashSet<WAL>(temp*4); final Random random = new Random(); int count = 0; for (int i = 0; i < temp*8; i++) { final WAL maybeNewWAL = wals.getWAL(Bytes.toBytes(random.nextInt())); LOG.info("Iteration " + i + ", checking wal " + maybeNewWAL); if (seen.add(maybeNewWAL)) { count++; } } assertEquals("received back a different number of WALs that are not equal() to each other " + "than the bound we placed.", temp*4, count); } finally { if (wals != null) { wals.close(); } conf.setInt(NUM_REGION_GROUPS, temp); } } |
WALFactory { public WAL getWAL(final byte[] identifier) throws IOException { return provider.getWAL(identifier); } private WALFactory(Configuration conf); WALFactory(final Configuration conf, final List<WALActionsListener> listeners,
final String factoryId); void close(); void shutdown(); WAL getWAL(final byte[] identifier); WAL getMetaWAL(final byte[] identifier); Reader createReader(final FileSystem fs, final Path path); Reader createReader(final FileSystem fs, final Path path,
CancelableProgressable reporter); Reader createReader(final FileSystem fs, final Path path,
CancelableProgressable reporter, boolean allowCustom); Writer createWALWriter(final FileSystem fs, final Path path); @VisibleForTesting Writer createRecoveredEditsWriter(final FileSystem fs, final Path path); static WALFactory getInstance(Configuration configuration); static Reader createReader(final FileSystem fs, final Path path,
final Configuration configuration); static Reader createReaderIgnoreCustomClass(final FileSystem fs, final Path path,
final Configuration configuration); @VisibleForTesting static Writer createWALWriter(final FileSystem fs, final Path path,
final Configuration configuration); static final String WAL_PROVIDER; } | @Test public void testVisitors() throws Exception { final int COL_COUNT = 10; final TableName tableName = TableName.valueOf("tablename"); final byte [] row = Bytes.toBytes("row"); final DumbWALActionsListener visitor = new DumbWALActionsListener(); final MultiVersionConcurrencyControl mvcc = new MultiVersionConcurrencyControl(1); long timestamp = System.currentTimeMillis(); HTableDescriptor htd = new HTableDescriptor(); htd.addFamily(new HColumnDescriptor("column")); HRegionInfo hri = new HRegionInfo(tableName, HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW); final WAL log = wals.getWAL(hri.getEncodedNameAsBytes()); log.registerWALActionsListener(visitor); for (int i = 0; i < COL_COUNT; i++) { WALEdit cols = new WALEdit(); cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(i)), timestamp, new byte[]{(byte) (i + '0')})); log.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, System.currentTimeMillis(), mvcc), cols, true); } log.sync(); assertEquals(COL_COUNT, visitor.increments); log.unregisterWALActionsListener(visitor); WALEdit cols = new WALEdit(); cols.add(new KeyValue(row, Bytes.toBytes("column"), Bytes.toBytes(Integer.toString(11)), timestamp, new byte[]{(byte) (11 + '0')})); log.append(htd, hri, new WALKey(hri.getEncodedNameAsBytes(), tableName, System.currentTimeMillis(), mvcc), cols, true); log.sync(); assertEquals(COL_COUNT, visitor.increments); }
@Test public void testWALCoprocessorLoaded() throws Exception { WALCoprocessorHost host = wals.getWAL(UNSPECIFIED_REGION).getCoprocessorHost(); Coprocessor c = host.findCoprocessor(SampleRegionWALObserver.class.getName()); assertNotNull(c); } |
FSTableDescriptors implements TableDescriptors { @VisibleForTesting static int getTableInfoSequenceId(final Path p) { if (p == null) return 0; Matcher m = TABLEINFO_FILE_REGEX.matcher(p.getName()); if (!m.matches()) throw new IllegalArgumentException(p.toString()); String suffix = m.group(2); if (suffix == null || suffix.length() <= 0) return 0; return Integer.parseInt(m.group(2)); } FSTableDescriptors(final Configuration conf); FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir); FSTableDescriptors(final Configuration conf, final FileSystem fs,
final Path rootdir, final boolean fsreadonly, final boolean usecache); void setCacheOn(); void setCacheOff(); @VisibleForTesting boolean isUsecache(); @Override HTableDescriptor get(final TableName tablename); @Override Map<String, HTableDescriptor> getAll(); @Override Map<String, HTableDescriptor> getByNamespace(String name); @Override void add(HTableDescriptor htd); @Override HTableDescriptor remove(final TableName tablename); boolean isTableInfoExists(TableName tableName); static FileStatus getTableInfoPath(FileSystem fs, Path tableDir); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, TableName tableName); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, TableName tableName, boolean rewritePb); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir,
boolean rewritePb); void deleteTableDescriptorIfExists(TableName tableName); boolean createTableDescriptor(HTableDescriptor htd); boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation); boolean createTableDescriptorForTableDirectory(Path tableDir,
HTableDescriptor htd, boolean forceCreation); } | @Test (expected=IllegalArgumentException.class) public void testRegexAgainstOldStyleTableInfo() { Path p = new Path("/tmp", FSTableDescriptors.TABLEINFO_FILE_PREFIX); int i = FSTableDescriptors.getTableInfoSequenceId(p); assertEquals(0, i); p = new Path("/tmp", "abc"); FSTableDescriptors.getTableInfoSequenceId(p); } |
ZooKeeperMainServer { public String parse(final Configuration c) { return ZKConfig.getZKQuorumServersString(c); } String parse(final Configuration c); static void main(String args[]); } | @Test public void testHostPortParse() { ZooKeeperMainServer parser = new ZooKeeperMainServer(); Configuration c = HBaseConfiguration.create(); assertEquals("localhost:" + c.get(HConstants.ZOOKEEPER_CLIENT_PORT), parser.parse(c)); final String port = "1234"; c.set(HConstants.ZOOKEEPER_CLIENT_PORT, port); c.set("hbase.zookeeper.quorum", "example.com"); assertEquals("example.com:" + port, parser.parse(c)); c.set("hbase.zookeeper.quorum", "example1.com,example2.com,example3.com"); String ensemble = parser.parse(c); assertTrue(port, ensemble.matches("(example[1-3]\\.com:1234,){2}example[1-3]\\.com:" + port)); c.set("hbase.zookeeper.quorum", "example1.com:5678,example2.com:9012,example3.com:3456"); ensemble = parser.parse(c); assertEquals(ensemble, "example1.com:5678,example2.com:9012,example3.com:3456"); c.set("hbase.zookeeper.quorum", "example1.com:5678,example2.com:9012,example3.com"); ensemble = parser.parse(c); assertEquals(ensemble, "example1.com:5678,example2.com:9012,example3.com:" + port); } |
CoprocessorHost { protected void loadSystemCoprocessors(Configuration conf, String confKey) { boolean coprocessorsEnabled = conf.getBoolean(COPROCESSORS_ENABLED_CONF_KEY, DEFAULT_COPROCESSORS_ENABLED); if (!coprocessorsEnabled) { return; } Class<?> implClass = null; String[] defaultCPClasses = conf.getStrings(confKey); if (defaultCPClasses == null || defaultCPClasses.length == 0) return; int priority = Coprocessor.PRIORITY_SYSTEM; for (String className : defaultCPClasses) { className = className.trim(); if (findCoprocessor(className) != null) { LOG.warn("Attempted duplicate loading of " + className + "; skipped"); continue; } ClassLoader cl = this.getClass().getClassLoader(); Thread.currentThread().setContextClassLoader(cl); try { implClass = cl.loadClass(className); this.coprocessors.add(loadInstance(implClass, Coprocessor.PRIORITY_SYSTEM, conf)); LOG.info("System coprocessor " + className + " was loaded " + "successfully with priority (" + priority++ + ")."); } catch (Throwable t) { abortServer(className, t); } } } CoprocessorHost(Abortable abortable); static Set<String> getLoadedCoprocessors(); Set<String> getCoprocessors(); E load(Path path, String className, int priority,
Configuration conf); void load(Class<?> implClass, int priority, Configuration conf); E loadInstance(Class<?> implClass, int priority, Configuration conf); abstract E createEnvironment(Class<?> implClass, Coprocessor instance,
int priority, int sequence, Configuration conf); void shutdown(CoprocessorEnvironment e); Coprocessor findCoprocessor(String className); List<T> findCoprocessors(Class<T> cls); CoprocessorEnvironment findCoprocessorEnvironment(String className); static final String REGION_COPROCESSOR_CONF_KEY; static final String REGIONSERVER_COPROCESSOR_CONF_KEY; static final String USER_REGION_COPROCESSOR_CONF_KEY; static final String MASTER_COPROCESSOR_CONF_KEY; static final String WAL_COPROCESSOR_CONF_KEY; static final String ABORT_ON_ERROR_KEY; static final boolean DEFAULT_ABORT_ON_ERROR; static final String COPROCESSORS_ENABLED_CONF_KEY; static final boolean DEFAULT_COPROCESSORS_ENABLED; static final String USER_COPROCESSORS_ENABLED_CONF_KEY; static final boolean DEFAULT_USER_COPROCESSORS_ENABLED; } | @Test public void testDoubleLoading() { final Configuration conf = HBaseConfiguration.create(); CoprocessorHost<CoprocessorEnvironment> host = new CoprocessorHost<CoprocessorEnvironment>(new TestAbortable()) { final Configuration cpHostConf = conf; @Override public CoprocessorEnvironment createEnvironment(Class<?> implClass, final Coprocessor instance, int priority, int sequence, Configuration conf) { return new CoprocessorEnvironment() { final Coprocessor envInstance = instance; @Override public int getVersion() { return 0; } @Override public String getHBaseVersion() { return "0.0.0"; } @Override public Coprocessor getInstance() { return envInstance; } @Override public int getPriority() { return 0; } @Override public int getLoadSequence() { return 0; } @Override public Configuration getConfiguration() { return cpHostConf; } @Override public HTableInterface getTable(TableName tableName) throws IOException { return null; } @Override public HTableInterface getTable(TableName tableName, ExecutorService service) throws IOException { return null; } @Override public ClassLoader getClassLoader() { return null; } }; } }; final String key = "KEY"; final String coprocessor = "org.apache.hadoop.hbase.coprocessor.SimpleRegionObserver"; conf.setStrings(key, coprocessor, coprocessor, coprocessor); host.loadSystemCoprocessors(conf, key); Assert.assertEquals(1, host.coprocessors.size()); } |
FSTableDescriptors implements TableDescriptors { private static String formatTableInfoSequenceId(final int number) { byte [] b = new byte[WIDTH_OF_SEQUENCE_ID]; int d = Math.abs(number); for (int i = b.length - 1; i >= 0; i--) { b[i] = (byte)((d % 10) + '0'); d /= 10; } return Bytes.toString(b); } FSTableDescriptors(final Configuration conf); FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir); FSTableDescriptors(final Configuration conf, final FileSystem fs,
final Path rootdir, final boolean fsreadonly, final boolean usecache); void setCacheOn(); void setCacheOff(); @VisibleForTesting boolean isUsecache(); @Override HTableDescriptor get(final TableName tablename); @Override Map<String, HTableDescriptor> getAll(); @Override Map<String, HTableDescriptor> getByNamespace(String name); @Override void add(HTableDescriptor htd); @Override HTableDescriptor remove(final TableName tablename); boolean isTableInfoExists(TableName tableName); static FileStatus getTableInfoPath(FileSystem fs, Path tableDir); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, TableName tableName); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, TableName tableName, boolean rewritePb); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir,
boolean rewritePb); void deleteTableDescriptorIfExists(TableName tableName); boolean createTableDescriptor(HTableDescriptor htd); boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation); boolean createTableDescriptorForTableDirectory(Path tableDir,
HTableDescriptor htd, boolean forceCreation); } | @Test public void testFormatTableInfoSequenceId() { Path p0 = assertWriteAndReadSequenceId(0); StringBuilder sb = new StringBuilder(); for (int i = 0; i < FSTableDescriptors.WIDTH_OF_SEQUENCE_ID; i++) { sb.append("0"); } assertEquals(FSTableDescriptors.TABLEINFO_FILE_PREFIX + "." + sb.toString(), p0.getName()); Path p2 = assertWriteAndReadSequenceId(2); Path p10000 = assertWriteAndReadSequenceId(10000); Path p = new Path(p0.getParent(), FSTableDescriptors.TABLEINFO_FILE_PREFIX); FileStatus fs = new FileStatus(0, false, 0, 0, 0, p); FileStatus fs0 = new FileStatus(0, false, 0, 0, 0, p0); FileStatus fs2 = new FileStatus(0, false, 0, 0, 0, p2); FileStatus fs10000 = new FileStatus(0, false, 0, 0, 0, p10000); Comparator<FileStatus> comparator = FSTableDescriptors.TABLEINFO_FILESTATUS_COMPARATOR; assertTrue(comparator.compare(fs, fs0) > 0); assertTrue(comparator.compare(fs0, fs2) > 0); assertTrue(comparator.compare(fs2, fs10000) > 0); } |
HttpRequestLog { public static RequestLog getRequestLog(String name) { String lookup = serverToComponent.get(name); if (lookup != null) { name = lookup; } String loggerName = "http.requests." + name; String appenderName = name + "requestlog"; Log logger = LogFactory.getLog(loggerName); if (logger instanceof Log4JLogger) { Log4JLogger httpLog4JLog = (Log4JLogger)logger; Logger httpLogger = httpLog4JLog.getLogger(); Appender appender = null; try { appender = httpLogger.getAppender(appenderName); } catch (LogConfigurationException e) { LOG.warn("Http request log for " + loggerName + " could not be created"); throw e; } if (appender == null) { LOG.info("Http request log for " + loggerName + " is not defined"); return null; } if (appender instanceof HttpRequestLogAppender) { HttpRequestLogAppender requestLogAppender = (HttpRequestLogAppender)appender; NCSARequestLog requestLog = new NCSARequestLog(); requestLog.setFilename(requestLogAppender.getFilename()); requestLog.setRetainDays(requestLogAppender.getRetainDays()); return requestLog; } else { LOG.warn("Jetty request log for " + loggerName + " was of the wrong class"); return null; } } else { LOG.warn("Jetty request log can only be enabled using Log4j"); return null; } } static RequestLog getRequestLog(String name); } | @Test public void testAppenderUndefined() { RequestLog requestLog = HttpRequestLog.getRequestLog("test"); assertNull("RequestLog should be null", requestLog); }
@Test public void testAppenderDefined() { HttpRequestLogAppender requestLogAppender = new HttpRequestLogAppender(); requestLogAppender.setName("testrequestlog"); Logger.getLogger("http.requests.test").addAppender(requestLogAppender); RequestLog requestLog = HttpRequestLog.getRequestLog("test"); Logger.getLogger("http.requests.test").removeAppender(requestLogAppender); assertNotNull("RequestLog should not be null", requestLog); assertEquals("Class mismatch", NCSARequestLog.class, requestLog.getClass()); } |
RegionPlan implements Comparable<RegionPlan> { @Override public int hashCode() { return getRegionName().hashCode(); } RegionPlan(final HRegionInfo hri, ServerName source, ServerName dest); void setDestination(ServerName dest); ServerName getSource(); ServerName getDestination(); String getRegionName(); HRegionInfo getRegionInfo(); @Override int compareTo(RegionPlan o); @Override int hashCode(); @Override boolean equals(Object obj); @Override String toString(); } | @Test public void test() { HRegionInfo hri = new HRegionInfo(TableName.valueOf("table")); ServerName source = ServerName.valueOf("source", 1234, 2345); ServerName dest = ServerName.valueOf("dest", 1234, 2345); RegionPlan plan = new RegionPlan(hri, source, dest); assertEquals(plan.hashCode(), new RegionPlan(hri, source, dest).hashCode()); assertEquals(plan, new RegionPlan(hri, source, dest)); assertEquals(plan.hashCode(), new RegionPlan(hri, dest, source).hashCode()); assertEquals(plan, new RegionPlan(hri, dest, source)); HRegionInfo other = new HRegionInfo(TableName.valueOf("other")); assertNotEquals(plan.hashCode(), new RegionPlan(other, source, dest).hashCode()); assertNotEquals(plan, new RegionPlan(other, source, dest)); } |
HMasterCommandLine extends ServerCommandLine { public int run(String args[]) throws Exception { Options opt = new Options(); opt.addOption("localRegionServers", true, "RegionServers to start in master process when running standalone"); opt.addOption("masters", true, "Masters to start in this process"); opt.addOption("minRegionServers", true, "Minimum RegionServers needed to host user tables"); opt.addOption("backup", false, "Do not try to become HMaster until the primary fails"); CommandLine cmd; try { cmd = new GnuParser().parse(opt, args); } catch (ParseException e) { LOG.error("Could not parse: ", e); usage(null); return 1; } if (cmd.hasOption("minRegionServers")) { String val = cmd.getOptionValue("minRegionServers"); getConf().setInt("hbase.regions.server.count.min", Integer.parseInt(val)); LOG.debug("minRegionServers set to " + val); } if (cmd.hasOption("minServers")) { String val = cmd.getOptionValue("minServers"); getConf().setInt("hbase.regions.server.count.min", Integer.parseInt(val)); LOG.debug("minServers set to " + val); } if (cmd.hasOption("backup")) { getConf().setBoolean(HConstants.MASTER_TYPE_BACKUP, true); } if (cmd.hasOption("localRegionServers")) { String val = cmd.getOptionValue("localRegionServers"); getConf().setInt("hbase.regionservers", Integer.parseInt(val)); LOG.debug("localRegionServers set to " + val); } if (cmd.hasOption("masters")) { String val = cmd.getOptionValue("masters"); getConf().setInt("hbase.masters", Integer.parseInt(val)); LOG.debug("masters set to " + val); } @SuppressWarnings("unchecked") List<String> remainingArgs = cmd.getArgList(); if (remainingArgs.size() != 1) { usage(null); return 1; } String command = remainingArgs.get(0); if ("start".equals(command)) { return startMaster(); } else if ("stop".equals(command)) { return stopMaster(); } else if ("clear".equals(command)) { return (ZNodeClearer.clear(getConf()) ? 0 : 1); } else { usage("Invalid command: " + command); return 1; } } HMasterCommandLine(Class<? extends HMaster> masterClass); int run(String args[]); } | @Test public void testRun() throws Exception { HMasterCommandLine masterCommandLine = new HMasterCommandLine(HMaster.class); masterCommandLine.setConf(TESTING_UTIL.getConfiguration()); assertEquals(0, masterCommandLine.run(new String [] {"clear"})); } |
ActiveMasterManager extends ZooKeeperListener { boolean blockUntilBecomingActiveMaster( int checkInterval, MonitoredTask startupStatus) { String backupZNode = ZKUtil.joinZNode( this.watcher.backupMasterAddressesZNode, this.sn.toString()); while (!(master.isAborted() || master.isStopped())) { startupStatus.setStatus("Trying to register in ZK as active master"); try { if (MasterAddressTracker.setMasterAddress(this.watcher, this.watcher.getMasterAddressZNode(), this.sn, infoPort)) { if (ZKUtil.checkExists(this.watcher, backupZNode) != -1) { LOG.info("Deleting ZNode for " + backupZNode + " from backup master directory"); ZKUtil.deleteNodeFailSilent(this.watcher, backupZNode); } ZNodeClearer.writeMyEphemeralNodeOnDisk(this.sn.toString()); startupStatus.setStatus("Successfully registered as active master."); this.clusterHasActiveMaster.set(true); LOG.info("Registered Active Master=" + this.sn); return true; } this.clusterHasActiveMaster.set(true); String msg; byte[] bytes = ZKUtil.getDataAndWatch(this.watcher, this.watcher.getMasterAddressZNode()); if (bytes == null) { msg = ("A master was detected, but went down before its address " + "could be read. Attempting to become the next active master"); } else { ServerName currentMaster; try { currentMaster = ServerName.parseFrom(bytes); } catch (DeserializationException e) { LOG.warn("Failed parse", e); continue; } if (ServerName.isSameHostnameAndPort(currentMaster, this.sn)) { msg = ("Current master has this master's address, " + currentMaster + "; master was restarted? Deleting node."); ZKUtil.deleteNode(this.watcher, this.watcher.getMasterAddressZNode()); ZNodeClearer.deleteMyEphemeralNodeOnDisk(); } else { msg = "Another master is the active master, " + currentMaster + "; waiting to become the next active master"; } } LOG.info(msg); startupStatus.setStatus(msg); } catch (KeeperException ke) { master.abort("Received an unexpected KeeperException, aborting", ke); return false; } synchronized (this.clusterHasActiveMaster) { while (clusterHasActiveMaster.get() && !master.isStopped()) { try { clusterHasActiveMaster.wait(checkInterval); } catch (InterruptedException e) { LOG.debug("Interrupted waiting for master to die", e); } } if (clusterShutDown.get()) { this.master.stop( "Cluster went down before this master became active"); } } } return false; } ActiveMasterManager(ZooKeeperWatcher watcher, ServerName sn, Server master); void setInfoPort(int infoPort); @Override void nodeCreated(String path); @Override void nodeDeleted(String path); void stop(); } | @Test public void testRestartMaster() throws IOException, KeeperException { ZooKeeperWatcher zk = new ZooKeeperWatcher(TEST_UTIL.getConfiguration(), "testActiveMasterManagerFromZK", null, true); try { ZKUtil.deleteNode(zk, zk.getMasterAddressZNode()); ZKUtil.deleteNode(zk, zk.clusterStateZNode); } catch(KeeperException.NoNodeException nne) {} ServerName master = ServerName.valueOf("localhost", 1, System.currentTimeMillis()); DummyMaster dummyMaster = new DummyMaster(zk,master); ClusterStatusTracker clusterStatusTracker = dummyMaster.getClusterStatusTracker(); ActiveMasterManager activeMasterManager = dummyMaster.getActiveMasterManager(); assertFalse(activeMasterManager.clusterHasActiveMaster.get()); MonitoredTask status = Mockito.mock(MonitoredTask.class); clusterStatusTracker.setClusterUp(); activeMasterManager.blockUntilBecomingActiveMaster(100, status); assertTrue(activeMasterManager.clusterHasActiveMaster.get()); assertMaster(zk, master); DummyMaster secondDummyMaster = new DummyMaster(zk,master); ActiveMasterManager secondActiveMasterManager = secondDummyMaster.getActiveMasterManager(); assertFalse(secondActiveMasterManager.clusterHasActiveMaster.get()); activeMasterManager.blockUntilBecomingActiveMaster(100, status); assertTrue(activeMasterManager.clusterHasActiveMaster.get()); assertMaster(zk, master); } |
ServerAndLoad implements Comparable<ServerAndLoad>, Serializable { @Override public int hashCode() { int result = load; result = 31 * result + ((sn == null) ? 0 : sn.hashCode()); return result; } ServerAndLoad(final ServerName sn, final int load); @Override int compareTo(ServerAndLoad other); @Override int hashCode(); @Override boolean equals(Object o); } | @Test public void test() { ServerName server = ServerName.valueOf("host", 12345, 112244); int startcode = 12; ServerAndLoad sal = new ServerAndLoad(server, startcode); assertEquals(sal.hashCode(), new ServerAndLoad(server, startcode).hashCode()); assertEquals(sal, new ServerAndLoad(server, startcode)); assertNotEquals(sal.hashCode(), new ServerAndLoad(server, startcode + 1).hashCode()); assertNotEquals(sal, new ServerAndLoad(server, startcode + 1)); ServerName other = ServerName.valueOf("other", 12345, 112244); assertNotEquals(sal.hashCode(), new ServerAndLoad(other, startcode).hashCode()); assertNotEquals(sal, new ServerAndLoad(other, startcode)); } |
FavoredNodeAssignmentHelper { void placePrimaryRSAsRoundRobin(Map<ServerName, List<HRegionInfo>> assignmentMap, Map<HRegionInfo, ServerName> primaryRSMap, List<HRegionInfo> regions) { List<String> rackList = new ArrayList<String>(rackToRegionServerMap.size()); rackList.addAll(rackToRegionServerMap.keySet()); int rackIndex = random.nextInt(rackList.size()); int maxRackSize = 0; for (Map.Entry<String,List<ServerName>> r : rackToRegionServerMap.entrySet()) { if (r.getValue().size() > maxRackSize) { maxRackSize = r.getValue().size(); } } int numIterations = 0; int firstServerIndex = random.nextInt(maxRackSize); int serverIndex = firstServerIndex; for (HRegionInfo regionInfo : regions) { List<ServerName> currentServerList; String rackName; while (true) { rackName = rackList.get(rackIndex); numIterations++; currentServerList = rackToRegionServerMap.get(rackName); if (serverIndex >= currentServerList.size()) { if (numIterations % rackList.size() == 0) { if (++serverIndex >= maxRackSize) serverIndex = 0; } if ((++rackIndex) >= rackList.size()) { rackIndex = 0; } } else break; } ServerName currentServer = currentServerList.get(serverIndex); primaryRSMap.put(regionInfo, currentServer); List<HRegionInfo> regionsForServer = assignmentMap.get(currentServer); if (regionsForServer == null) { regionsForServer = new ArrayList<HRegionInfo>(); assignmentMap.put(currentServer, regionsForServer); } regionsForServer.add(regionInfo); if (numIterations % rackList.size() == 0) { ++serverIndex; } if ((++rackIndex) >= rackList.size()) { rackIndex = 0; } } } FavoredNodeAssignmentHelper(final List<ServerName> servers, Configuration conf); FavoredNodeAssignmentHelper(final List<ServerName> servers,
final RackManager rackManager); static void updateMetaWithFavoredNodesInfo(
Map<HRegionInfo, List<ServerName>> regionToFavoredNodes,
Connection connection); static void updateMetaWithFavoredNodesInfo(
Map<HRegionInfo, List<ServerName>> regionToFavoredNodes,
Configuration conf); static ServerName[] getFavoredNodesList(byte[] favoredNodes); static byte[] getFavoredNodes(List<ServerName> serverAddrList); Map<HRegionInfo, ServerName[]> placeSecondaryAndTertiaryWithRestrictions(
Map<HRegionInfo, ServerName> primaryRSMap); void initialize(); static String getFavoredNodesAsString(List<ServerName> nodes); static final byte [] FAVOREDNODES_QUALIFIER; final static short FAVORED_NODES_NUM; } | @Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test public void testPlacePrimaryRSAsRoundRobin() { primaryRSPlacement(6, null, 10, 10, 10); primaryRSPlacement(600, null, 10, 10, 10); } |
FavoredNodeAssignmentHelper { Map<HRegionInfo, ServerName[]> placeSecondaryAndTertiaryRS( Map<HRegionInfo, ServerName> primaryRSMap) { Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap = new HashMap<HRegionInfo, ServerName[]>(); for (Map.Entry<HRegionInfo, ServerName> entry : primaryRSMap.entrySet()) { HRegionInfo regionInfo = entry.getKey(); ServerName primaryRS = entry.getValue(); try { ServerName[] favoredNodes; String primaryRack = rackManager.getRack(primaryRS); if (getTotalNumberOfRacks() == 1) { favoredNodes = singleRackCase(regionInfo, primaryRS, primaryRack); } else { favoredNodes = multiRackCase(regionInfo, primaryRS, primaryRack); } if (favoredNodes != null) { secondaryAndTertiaryMap.put(regionInfo, favoredNodes); LOG.debug("Place the secondary and tertiary region server for region " + regionInfo.getRegionNameAsString()); } } catch (Exception e) { LOG.warn("Cannot place the favored nodes for region " + regionInfo.getRegionNameAsString() + " because " + e, e); continue; } } return secondaryAndTertiaryMap; } FavoredNodeAssignmentHelper(final List<ServerName> servers, Configuration conf); FavoredNodeAssignmentHelper(final List<ServerName> servers,
final RackManager rackManager); static void updateMetaWithFavoredNodesInfo(
Map<HRegionInfo, List<ServerName>> regionToFavoredNodes,
Connection connection); static void updateMetaWithFavoredNodesInfo(
Map<HRegionInfo, List<ServerName>> regionToFavoredNodes,
Configuration conf); static ServerName[] getFavoredNodesList(byte[] favoredNodes); static byte[] getFavoredNodes(List<ServerName> serverAddrList); Map<HRegionInfo, ServerName[]> placeSecondaryAndTertiaryWithRestrictions(
Map<HRegionInfo, ServerName> primaryRSMap); void initialize(); static String getFavoredNodesAsString(List<ServerName> nodes); static final byte [] FAVOREDNODES_QUALIFIER; final static short FAVORED_NODES_NUM; } | @Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test public void testSecondaryAndTertiaryPlacementWithSingleRack() { Map<String,Integer> rackToServerCount = new HashMap<String,Integer>(); rackToServerCount.put("rack1", 10); Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>> primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); List<HRegionInfo> regions = primaryRSMapAndHelper.getThird(); Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap = helper.placeSecondaryAndTertiaryRS(primaryRSMap); for (HRegionInfo region : regions) { ServerName[] secondaryAndTertiaryServers = secondaryAndTertiaryMap.get(region); assertTrue(!secondaryAndTertiaryServers[0].equals(primaryRSMap.get(region))); assertTrue(!secondaryAndTertiaryServers[1].equals(primaryRSMap.get(region))); assertTrue(!secondaryAndTertiaryServers[0].equals(secondaryAndTertiaryServers[1])); } }
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test public void testSecondaryAndTertiaryPlacementWithSingleServer() { Map<String,Integer> rackToServerCount = new HashMap<String,Integer>(); rackToServerCount.put("rack1", 1); Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>> primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(1, rackToServerCount); FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); List<HRegionInfo> regions = primaryRSMapAndHelper.getThird(); Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap = helper.placeSecondaryAndTertiaryRS(primaryRSMap); assertTrue(secondaryAndTertiaryMap.get(regions.get(0)) == null); }
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test public void testSecondaryAndTertiaryPlacementWithMultipleRacks() { Map<String,Integer> rackToServerCount = new HashMap<String,Integer>(); rackToServerCount.put("rack1", 10); rackToServerCount.put("rack2", 10); Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>> primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(60000, rackToServerCount); FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); assertTrue(primaryRSMap.size() == 60000); Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap = helper.placeSecondaryAndTertiaryRS(primaryRSMap); assertTrue(secondaryAndTertiaryMap.size() == 60000); for (Map.Entry<HRegionInfo, ServerName[]> entry : secondaryAndTertiaryMap.entrySet()) { ServerName[] allServersForRegion = entry.getValue(); String primaryRSRack = rackManager.getRack(primaryRSMap.get(entry.getKey())); String secondaryRSRack = rackManager.getRack(allServersForRegion[0]); String tertiaryRSRack = rackManager.getRack(allServersForRegion[1]); assertTrue(!primaryRSRack.equals(secondaryRSRack)); assertTrue(secondaryRSRack.equals(tertiaryRSRack)); } }
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test public void testSecondaryAndTertiaryPlacementWithLessThanTwoServersInRacks() { Map<String,Integer> rackToServerCount = new HashMap<String,Integer>(); rackToServerCount.put("rack1", 1); rackToServerCount.put("rack2", 1); Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>> primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); List<HRegionInfo> regions = primaryRSMapAndHelper.getThird(); assertTrue(primaryRSMap.size() == 6); Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap = helper.placeSecondaryAndTertiaryRS(primaryRSMap); for (HRegionInfo region : regions) { assertTrue(secondaryAndTertiaryMap.get(region) == null); } }
@Ignore("Disabled for now until FavoredNodes gets finished as a feature") @Test public void testSecondaryAndTertiaryPlacementWithMoreThanOneServerInPrimaryRack() { Map<String,Integer> rackToServerCount = new HashMap<String,Integer>(); rackToServerCount.put("rack1", 2); rackToServerCount.put("rack2", 1); Triple<Map<HRegionInfo, ServerName>, FavoredNodeAssignmentHelper, List<HRegionInfo>> primaryRSMapAndHelper = secondaryAndTertiaryRSPlacementHelper(6, rackToServerCount); FavoredNodeAssignmentHelper helper = primaryRSMapAndHelper.getSecond(); Map<HRegionInfo, ServerName> primaryRSMap = primaryRSMapAndHelper.getFirst(); List<HRegionInfo> regions = primaryRSMapAndHelper.getThird(); assertTrue(primaryRSMap.size() == 6); Map<HRegionInfo, ServerName[]> secondaryAndTertiaryMap = helper.placeSecondaryAndTertiaryRS(primaryRSMap); for (HRegionInfo region : regions) { ServerName s = primaryRSMap.get(region); ServerName secondaryRS = secondaryAndTertiaryMap.get(region)[0]; ServerName tertiaryRS = secondaryAndTertiaryMap.get(region)[1]; if (rackManager.getRack(s).equals("rack1")) { assertTrue(rackManager.getRack(secondaryRS).equals("rack2") && rackManager.getRack(tertiaryRS).equals("rack1")); } if (rackManager.getRack(s).equals("rack2")) { assertTrue(rackManager.getRack(secondaryRS).equals("rack1") && rackManager.getRack(tertiaryRS).equals("rack1")); } } } |
FSTableDescriptors implements TableDescriptors { @Override public Map<String, HTableDescriptor> getAll() throws IOException { Map<String, HTableDescriptor> htds = new TreeMap<String, HTableDescriptor>(); if (fsvisited && usecache) { for (Map.Entry<TableName, HTableDescriptor> entry: this.cache.entrySet()) { htds.put(entry.getKey().toString(), entry.getValue()); } htds.put(HTableDescriptor.META_TABLEDESC.getTableName().getNameAsString(), HTableDescriptor.META_TABLEDESC); } else { LOG.debug("Fetching table descriptors from the filesystem."); boolean allvisited = true; for (Path d : FSUtils.getTableDirs(fs, rootdir)) { HTableDescriptor htd = null; try { htd = get(FSUtils.getTableName(d)); } catch (FileNotFoundException fnfe) { LOG.warn("Trouble retrieving htd", fnfe); } if (htd == null) { allvisited = false; continue; } else { htds.put(htd.getTableName().getNameAsString(), htd); } fsvisited = allvisited; } } return htds; } FSTableDescriptors(final Configuration conf); FSTableDescriptors(final Configuration conf, final FileSystem fs, final Path rootdir); FSTableDescriptors(final Configuration conf, final FileSystem fs,
final Path rootdir, final boolean fsreadonly, final boolean usecache); void setCacheOn(); void setCacheOff(); @VisibleForTesting boolean isUsecache(); @Override HTableDescriptor get(final TableName tablename); @Override Map<String, HTableDescriptor> getAll(); @Override Map<String, HTableDescriptor> getByNamespace(String name); @Override void add(HTableDescriptor htd); @Override HTableDescriptor remove(final TableName tablename); boolean isTableInfoExists(TableName tableName); static FileStatus getTableInfoPath(FileSystem fs, Path tableDir); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, TableName tableName); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs,
Path hbaseRootDir, TableName tableName, boolean rewritePb); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir); static HTableDescriptor getTableDescriptorFromFs(FileSystem fs, Path tableDir,
boolean rewritePb); void deleteTableDescriptorIfExists(TableName tableName); boolean createTableDescriptor(HTableDescriptor htd); boolean createTableDescriptor(HTableDescriptor htd, boolean forceCreation); boolean createTableDescriptorForTableDirectory(Path tableDir,
HTableDescriptor htd, boolean forceCreation); } | @Test public void testGetAll() throws IOException, InterruptedException { final String name = "testGetAll"; FileSystem fs = FileSystem.get(UTIL.getConfiguration()); Path rootdir = new Path(UTIL.getDataTestDir(), name); FSTableDescriptors htds = new FSTableDescriptorsTest(fs, rootdir); final int count = 4; for (int i = 0; i < count; i++) { HTableDescriptor htd = new HTableDescriptor(name + i); htds.createTableDescriptor(htd); } HTableDescriptor htd = new HTableDescriptor(HTableDescriptor.META_TABLEDESC.getTableName()); htds.createTableDescriptor(htd); assertTrue(htds.getAll().size() == count + 1); } |
BaseLoadBalancer implements LoadBalancer { @Override public Map<HRegionInfo, ServerName> immediateAssignment(List<HRegionInfo> regions, List<ServerName> servers) { metricsBalancer.incrMiscInvocations(); if (servers == null || servers.isEmpty()) { LOG.warn("Wanted to do random assignment but no servers to assign to"); return null; } Map<HRegionInfo, ServerName> assignments = new TreeMap<HRegionInfo, ServerName>(); for (HRegionInfo region : regions) { assignments.put(region, randomAssignment(region, servers)); } return assignments; } static boolean tablesOnMaster(Configuration conf); @Override void setConf(Configuration conf); boolean shouldBeOnMaster(HRegionInfo region); @Override Configuration getConf(); @Override synchronized void setClusterStatus(ClusterStatus st); @Override void setMasterServices(MasterServices masterServices); void setRackManager(RackManager rackManager); @Override Map<ServerName, List<HRegionInfo>> roundRobinAssignment(List<HRegionInfo> regions,
List<ServerName> servers); @Override Map<HRegionInfo, ServerName> immediateAssignment(List<HRegionInfo> regions,
List<ServerName> servers); @Override ServerName randomAssignment(HRegionInfo regionInfo, List<ServerName> servers); @Override Map<ServerName, List<HRegionInfo>> retainAssignment(Map<HRegionInfo, ServerName> regions,
List<ServerName> servers); @Override void initialize(); @Override void regionOnline(HRegionInfo regionInfo, ServerName sn); @Override void regionOffline(HRegionInfo regionInfo); @Override boolean isStopped(); @Override void stop(String why); @Override void onConfigurationChange(Configuration conf); static final String TABLES_ON_MASTER; } | @Test (timeout=30000) public void testImmediateAssignment() throws Exception { for (int[] mock : regionsAndServersMocks) { LOG.debug("testImmediateAssignment with " + mock[0] + " regions and " + mock[1] + " servers"); List<HRegionInfo> regions = randomRegions(mock[0]); List<ServerAndLoad> servers = randomServers(mock[1], 0); List<ServerName> list = getListOfServerNames(servers); Map<HRegionInfo, ServerName> assignments = loadBalancer.immediateAssignment(regions, list); assertImmediateAssignment(regions, list, assignments); returnRegions(regions); returnServers(list); } } |
BaseLoadBalancer implements LoadBalancer { @Override public Map<ServerName, List<HRegionInfo>> roundRobinAssignment(List<HRegionInfo> regions, List<ServerName> servers) { metricsBalancer.incrMiscInvocations(); Map<ServerName, List<HRegionInfo>> assignments = assignMasterRegions(regions, servers); if (assignments != null && !assignments.isEmpty()) { servers = new ArrayList<ServerName>(servers); servers.remove(masterServerName); List<HRegionInfo> masterRegions = assignments.get(masterServerName); if (!masterRegions.isEmpty()) { regions = new ArrayList<HRegionInfo>(regions); for (HRegionInfo region: masterRegions) { regions.remove(region); } } } if (regions == null || regions.isEmpty()) { return assignments; } int numServers = servers == null ? 0 : servers.size(); if (numServers == 0) { LOG.warn("Wanted to do round robin assignment but no servers to assign to"); return null; } if (numServers == 1) { ServerName server = servers.get(0); assignments.put(server, new ArrayList<HRegionInfo>(regions)); return assignments; } Cluster cluster = createCluster(servers, regions); List<HRegionInfo> unassignedRegions = new ArrayList<HRegionInfo>(); roundRobinAssignment(cluster, regions, unassignedRegions, servers, assignments); List<HRegionInfo> lastFewRegions = new ArrayList<HRegionInfo>(); int serverIdx = RANDOM.nextInt(numServers); for (HRegionInfo region : unassignedRegions) { boolean assigned = false; for (int j = 0; j < numServers; j++) { ServerName serverName = servers.get((j + serverIdx) % numServers); if (!cluster.wouldLowerAvailability(region, serverName)) { List<HRegionInfo> serverRegions = assignments.get(serverName); if (serverRegions == null) { serverRegions = new ArrayList<HRegionInfo>(); assignments.put(serverName, serverRegions); } serverRegions.add(region); cluster.doAssignRegion(region, serverName); serverIdx = (j + serverIdx + 1) % numServers; assigned = true; break; } } if (!assigned) { lastFewRegions.add(region); } } for (HRegionInfo region : lastFewRegions) { int i = RANDOM.nextInt(numServers); ServerName server = servers.get(i); List<HRegionInfo> serverRegions = assignments.get(server); if (serverRegions == null) { serverRegions = new ArrayList<HRegionInfo>(); assignments.put(server, serverRegions); } serverRegions.add(region); cluster.doAssignRegion(region, server); } return assignments; } static boolean tablesOnMaster(Configuration conf); @Override void setConf(Configuration conf); boolean shouldBeOnMaster(HRegionInfo region); @Override Configuration getConf(); @Override synchronized void setClusterStatus(ClusterStatus st); @Override void setMasterServices(MasterServices masterServices); void setRackManager(RackManager rackManager); @Override Map<ServerName, List<HRegionInfo>> roundRobinAssignment(List<HRegionInfo> regions,
List<ServerName> servers); @Override Map<HRegionInfo, ServerName> immediateAssignment(List<HRegionInfo> regions,
List<ServerName> servers); @Override ServerName randomAssignment(HRegionInfo regionInfo, List<ServerName> servers); @Override Map<ServerName, List<HRegionInfo>> retainAssignment(Map<HRegionInfo, ServerName> regions,
List<ServerName> servers); @Override void initialize(); @Override void regionOnline(HRegionInfo regionInfo, ServerName sn); @Override void regionOffline(HRegionInfo regionInfo); @Override boolean isStopped(); @Override void stop(String why); @Override void onConfigurationChange(Configuration conf); static final String TABLES_ON_MASTER; } | @Test (timeout=180000) public void testBulkAssignment() throws Exception { for (int[] mock : regionsAndServersMocks) { LOG.debug("testBulkAssignment with " + mock[0] + " regions and " + mock[1] + " servers"); List<HRegionInfo> regions = randomRegions(mock[0]); List<ServerAndLoad> servers = randomServers(mock[1], 0); List<ServerName> list = getListOfServerNames(servers); Map<ServerName, List<HRegionInfo>> assignments = loadBalancer.roundRobinAssignment(regions, list); float average = (float) regions.size() / servers.size(); int min = (int) Math.floor(average); int max = (int) Math.ceil(average); if (assignments != null && !assignments.isEmpty()) { for (List<HRegionInfo> regionList : assignments.values()) { assertTrue(regionList.size() == min || regionList.size() == max); } } returnRegions(regions); returnServers(list); } } |
BaseLoadBalancer implements LoadBalancer { @Override public Map<ServerName, List<HRegionInfo>> retainAssignment(Map<HRegionInfo, ServerName> regions, List<ServerName> servers) { metricsBalancer.incrMiscInvocations(); Map<ServerName, List<HRegionInfo>> assignments = assignMasterRegions(regions.keySet(), servers); if (assignments != null && !assignments.isEmpty()) { servers = new ArrayList<ServerName>(servers); servers.remove(masterServerName); List<HRegionInfo> masterRegions = assignments.get(masterServerName); if (!masterRegions.isEmpty()) { regions = new HashMap<HRegionInfo, ServerName>(regions); for (HRegionInfo region: masterRegions) { regions.remove(region); } } } if (regions == null || regions.isEmpty()) { return assignments; } int numServers = servers == null ? 0 : servers.size(); if (numServers == 0) { LOG.warn("Wanted to do retain assignment but no servers to assign to"); return null; } if (numServers == 1) { ServerName server = servers.get(0); assignments.put(server, new ArrayList<HRegionInfo>(regions.keySet())); return assignments; } ArrayListMultimap<String, ServerName> serversByHostname = ArrayListMultimap.create(); for (ServerName server : servers) { assignments.put(server, new ArrayList<HRegionInfo>()); serversByHostname.put(server.getHostname(), server); } Set<String> oldHostsNoLongerPresent = Sets.newTreeSet(); int numRandomAssignments = 0; int numRetainedAssigments = 0; Cluster cluster = createCluster(servers, regions.keySet()); for (Map.Entry<HRegionInfo, ServerName> entry : regions.entrySet()) { HRegionInfo region = entry.getKey(); ServerName oldServerName = entry.getValue(); List<ServerName> localServers = new ArrayList<ServerName>(); if (oldServerName != null) { localServers = serversByHostname.get(oldServerName.getHostname()); } if (localServers.isEmpty()) { ServerName randomServer = randomAssignment(cluster, region, servers); assignments.get(randomServer).add(region); numRandomAssignments++; if (oldServerName != null) oldHostsNoLongerPresent.add(oldServerName.getHostname()); } else if (localServers.size() == 1) { ServerName target = localServers.get(0); assignments.get(target).add(region); cluster.doAssignRegion(region, target); numRetainedAssigments++; } else { if (localServers.contains(oldServerName)) { assignments.get(oldServerName).add(region); cluster.doAssignRegion(region, oldServerName); } else { ServerName target = null; for (ServerName tmp: localServers) { if (tmp.getPort() == oldServerName.getPort()) { target = tmp; break; } } if (target == null) { target = randomAssignment(cluster, region, localServers); } assignments.get(target).add(region); } numRetainedAssigments++; } } String randomAssignMsg = ""; if (numRandomAssignments > 0) { randomAssignMsg = numRandomAssignments + " regions were assigned " + "to random hosts, since the old hosts for these regions are no " + "longer present in the cluster. These hosts were:\n " + Joiner.on("\n ").join(oldHostsNoLongerPresent); } LOG.info("Reassigned " + regions.size() + " regions. " + numRetainedAssigments + " retained the pre-restart assignment. " + randomAssignMsg); return assignments; } static boolean tablesOnMaster(Configuration conf); @Override void setConf(Configuration conf); boolean shouldBeOnMaster(HRegionInfo region); @Override Configuration getConf(); @Override synchronized void setClusterStatus(ClusterStatus st); @Override void setMasterServices(MasterServices masterServices); void setRackManager(RackManager rackManager); @Override Map<ServerName, List<HRegionInfo>> roundRobinAssignment(List<HRegionInfo> regions,
List<ServerName> servers); @Override Map<HRegionInfo, ServerName> immediateAssignment(List<HRegionInfo> regions,
List<ServerName> servers); @Override ServerName randomAssignment(HRegionInfo regionInfo, List<ServerName> servers); @Override Map<ServerName, List<HRegionInfo>> retainAssignment(Map<HRegionInfo, ServerName> regions,
List<ServerName> servers); @Override void initialize(); @Override void regionOnline(HRegionInfo regionInfo, ServerName sn); @Override void regionOffline(HRegionInfo regionInfo); @Override boolean isStopped(); @Override void stop(String why); @Override void onConfigurationChange(Configuration conf); static final String TABLES_ON_MASTER; } | @Test (timeout=180000) public void testRetainAssignment() throws Exception { List<ServerAndLoad> servers = randomServers(10, 10); List<HRegionInfo> regions = randomRegions(100); Map<HRegionInfo, ServerName> existing = new TreeMap<HRegionInfo, ServerName>(); for (int i = 0; i < regions.size(); i++) { ServerName sn = servers.get(i % servers.size()).getServerName(); ServerName snWithOldStartCode = ServerName.valueOf(sn.getHostname(), sn.getPort(), sn.getStartcode() - 10); existing.put(regions.get(i), snWithOldStartCode); } List<ServerName> listOfServerNames = getListOfServerNames(servers); Map<ServerName, List<HRegionInfo>> assignment = loadBalancer.retainAssignment(existing, listOfServerNames); assertRetainedAssignment(existing, listOfServerNames, assignment); List<ServerAndLoad> servers2 = new ArrayList<ServerAndLoad>(servers); servers2.add(randomServer(10)); servers2.add(randomServer(10)); listOfServerNames = getListOfServerNames(servers2); assignment = loadBalancer.retainAssignment(existing, listOfServerNames); assertRetainedAssignment(existing, listOfServerNames, assignment); List<ServerAndLoad> servers3 = new ArrayList<ServerAndLoad>(servers); servers3.remove(0); servers3.remove(0); listOfServerNames = getListOfServerNames(servers3); assignment = loadBalancer.retainAssignment(existing, listOfServerNames); assertRetainedAssignment(existing, listOfServerNames, assignment); } |
RegionLocationFinder { protected HDFSBlocksDistribution internalGetTopBlockLocation(HRegionInfo region) { try { HTableDescriptor tableDescriptor = getTableDescriptor(region.getTable()); if (tableDescriptor != null) { HDFSBlocksDistribution blocksDistribution = HRegion.computeHDFSBlocksDistribution(getConf(), tableDescriptor, region); return blocksDistribution; } } catch (IOException ioe) { LOG.warn("IOException during HDFSBlocksDistribution computation. for " + "region = " + region.getEncodedName(), ioe); } return new HDFSBlocksDistribution(); } RegionLocationFinder(); Configuration getConf(); void setConf(Configuration conf); void setServices(MasterServices services); void setClusterStatus(ClusterStatus status); HDFSBlocksDistribution getBlockDistribution(HRegionInfo hri); } | @Test public void testInternalGetTopBlockLocation() throws Exception { for (int i = 0; i < ServerNum; i++) { HRegionServer server = cluster.getRegionServer(i); for (Region region : server.getOnlineRegions(tableName)) { HDFSBlocksDistribution blocksDistribution1 = region.getHDFSBlocksDistribution(); HDFSBlocksDistribution blocksDistribution2 = finder.getBlockDistribution(region .getRegionInfo()); assertEquals(blocksDistribution1.getUniqueBlocksTotalWeight(), blocksDistribution2.getUniqueBlocksTotalWeight()); if (blocksDistribution1.getUniqueBlocksTotalWeight() != 0) { assertEquals(blocksDistribution1.getTopHosts().get(0), blocksDistribution2.getTopHosts() .get(0)); } } } } |
RegionLocationFinder { protected List<ServerName> mapHostNameToServerName(List<String> hosts) { if (hosts == null || status == null) { if (hosts == null) { LOG.warn("RegionLocationFinder top hosts is null"); } return Lists.newArrayList(); } List<ServerName> topServerNames = new ArrayList<ServerName>(); Collection<ServerName> regionServers = status.getServers(); HashMap<String, List<ServerName>> hostToServerName = new HashMap<String, List<ServerName>>(); for (ServerName sn : regionServers) { String host = sn.getHostname(); if (!hostToServerName.containsKey(host)) { hostToServerName.put(host, new ArrayList<ServerName>()); } hostToServerName.get(host).add(sn); } for (String host : hosts) { if (!hostToServerName.containsKey(host)) { continue; } for (ServerName sn : hostToServerName.get(host)) { if (sn != null) { topServerNames.add(sn); } } } return topServerNames; } RegionLocationFinder(); Configuration getConf(); void setConf(Configuration conf); void setServices(MasterServices services); void setClusterStatus(ClusterStatus status); HDFSBlocksDistribution getBlockDistribution(HRegionInfo hri); } | @Test public void testMapHostNameToServerName() throws Exception { List<String> topHosts = new ArrayList<String>(); for (int i = 0; i < ServerNum; i++) { HRegionServer server = cluster.getRegionServer(i); String serverHost = server.getServerName().getHostname(); if (!topHosts.contains(serverHost)) { topHosts.add(serverHost); } } List<ServerName> servers = finder.mapHostNameToServerName(topHosts); assertEquals(1, topHosts.size()); for (int i = 0; i < ServerNum; i++) { ServerName server = cluster.getRegionServer(i).getServerName(); assertTrue(servers.contains(server)); } } |
RegionLocationFinder { protected List<ServerName> getTopBlockLocations(HRegionInfo region) { HDFSBlocksDistribution blocksDistribution = getBlockDistribution(region); List<String> topHosts = blocksDistribution.getTopHosts(); return mapHostNameToServerName(topHosts); } RegionLocationFinder(); Configuration getConf(); void setConf(Configuration conf); void setServices(MasterServices services); void setClusterStatus(ClusterStatus status); HDFSBlocksDistribution getBlockDistribution(HRegionInfo hri); } | @Test public void testGetTopBlockLocations() throws Exception { for (int i = 0; i < ServerNum; i++) { HRegionServer server = cluster.getRegionServer(i); for (Region region : server.getOnlineRegions(tableName)) { List<ServerName> servers = finder.getTopBlockLocations(region.getRegionInfo()); if (region.getHDFSBlocksDistribution().getUniqueBlocksTotalWeight() == 0) { continue; } List<String> topHosts = region.getHDFSBlocksDistribution().getTopHosts(); if (!topHosts.contains(server.getServerName().getHostname())) { continue; } for (int j = 0; j < ServerNum; j++) { ServerName serverName = cluster.getRegionServer(j).getServerName(); assertTrue(servers.contains(serverName)); } } } } |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.