repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
flink
|
flink-master/flink-python/pyflink/fn_execution/table/aggregate_slow.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC, abstractmethod
from typing import List, Dict, Iterable
from pyflink.common import Row, RowKind
from pyflink.fn_execution.coders import PickleCoder
from pyflink.fn_execution.table.state_data_view import DataViewSpec, ListViewSpec, MapViewSpec, \
PerKeyStateDataViewStore
from pyflink.table import AggregateFunction, FunctionContext, TableAggregateFunction
from pyflink.table.udf import ImperativeAggregateFunction
def join_row(left: List, right: List):
return Row(*(left + right))
class DistinctViewDescriptor(object):
def __init__(self, input_extractor, filter_args):
self._input_extractor = input_extractor
self._filter_args = filter_args
def get_input_extractor(self):
return self._input_extractor
def get_filter_args(self):
return self._filter_args
class RowKeySelector(object):
"""
A simple key selector used to extract the current key from the input List according to the
group-by field indexes.
"""
def __init__(self, grouping):
self.grouping = grouping
def get_key(self, data):
return [data[i] for i in self.grouping]
class AggsHandleFunctionBase(ABC):
"""
The base class for handling aggregate or table aggregate functions.
"""
@abstractmethod
def open(self, state_data_view_store):
"""
Initialization method for the function. It is called before the actual working methods.
:param state_data_view_store: The object used to manage the DataView.
"""
pass
@abstractmethod
def accumulate(self, input_data: Row):
"""
Accumulates the input values to the accumulators.
:param input_data: Input values bundled in a Row.
"""
pass
@abstractmethod
def retract(self, input_data: Row):
"""
Retracts the input values from the accumulators.
:param input_data: Input values bundled in a Row.
"""
@abstractmethod
def merge(self, accumulators: List):
"""
Merges the other accumulators into current accumulators.
:param accumulators: The other List of accumulators.
"""
pass
@abstractmethod
def set_accumulators(self, accumulators: List):
"""
Set the current accumulators (saved in a List) which contains the current aggregated
results.
In streaming: accumulators are stored in the state, we need to restore aggregate buffers
from state.
In batch: accumulators are stored in the dict, we need to restore aggregate buffers from
dict.
:param accumulators: Current accumulators.
"""
pass
@abstractmethod
def get_accumulators(self) -> List:
"""
Gets the current accumulators (saved in a list) which contains the current
aggregated results.
:return: The current accumulators.
"""
pass
@abstractmethod
def create_accumulators(self) -> List:
"""
Initializes the accumulators and save them to an accumulators List.
:return: A List of accumulators which contains the aggregated results.
"""
pass
@abstractmethod
def cleanup(self):
"""
Cleanup for the retired accumulators state.
"""
pass
@abstractmethod
def close(self):
"""
Tear-down method for this function. It can be used for clean up work.
By default, this method does nothing.
"""
pass
class AggsHandleFunction(AggsHandleFunctionBase):
"""
The base class for handling aggregate functions.
"""
@abstractmethod
def get_value(self) -> List:
"""
Gets the result of the aggregation from the current accumulators.
:return: The final result (saved in a row) of the current accumulators.
"""
pass
class TableAggsHandleFunction(AggsHandleFunctionBase):
"""
The base class for handling table aggregate functions.
"""
@abstractmethod
def emit_value(self, current_key: List, is_retract: bool) -> Iterable[Row]:
"""
Emit the result of the table aggregation.
"""
pass
class SimpleAggsHandleFunctionBase(AggsHandleFunctionBase):
"""
A simple AggsHandleFunctionBase implementation which provides the basic functionality.
"""
def __init__(self,
udfs: List[ImperativeAggregateFunction],
input_extractors: List,
udf_data_view_specs: List[List[DataViewSpec]],
filter_args: List[int],
distinct_indexes: List[int],
distinct_view_descriptors: Dict[int, DistinctViewDescriptor]):
self._udfs = udfs
self._input_extractors = input_extractors
self._accumulators = None # type: List
self._udf_data_view_specs = udf_data_view_specs
self._udf_data_views = []
self._filter_args = filter_args
self._distinct_indexes = distinct_indexes
self._distinct_view_descriptors = distinct_view_descriptors
self._distinct_data_views = {}
def open(self, state_data_view_store):
for udf in self._udfs:
udf.open(state_data_view_store.get_runtime_context())
self._udf_data_views = []
for data_view_specs in self._udf_data_view_specs:
data_views = {}
for data_view_spec in data_view_specs:
if isinstance(data_view_spec, ListViewSpec):
data_views[data_view_spec.field_index] = \
state_data_view_store.get_state_list_view(
data_view_spec.state_id,
data_view_spec.element_coder)
elif isinstance(data_view_spec, MapViewSpec):
data_views[data_view_spec.field_index] = \
state_data_view_store.get_state_map_view(
data_view_spec.state_id,
data_view_spec.key_coder,
data_view_spec.value_coder)
self._udf_data_views.append(data_views)
for key in self._distinct_view_descriptors.keys():
self._distinct_data_views[key] = state_data_view_store.get_state_map_view(
"agg%ddistinct" % key,
PickleCoder(),
PickleCoder())
def accumulate(self, input_data: Row):
for i in range(len(self._udfs)):
if i in self._distinct_data_views:
if len(self._distinct_view_descriptors[i].get_filter_args()) == 0:
filtered = False
else:
filtered = True
for filter_arg in self._distinct_view_descriptors[i].get_filter_args():
if input_data[filter_arg]:
filtered = False
break
if not filtered:
input_extractor = self._distinct_view_descriptors[i].get_input_extractor()
args = input_extractor(input_data)
if args in self._distinct_data_views[i]:
self._distinct_data_views[i][args] += 1
else:
self._distinct_data_views[i][args] = 1
if self._filter_args[i] >= 0 and not input_data[self._filter_args[i]]:
continue
input_extractor = self._input_extractors[i]
args = input_extractor(input_data)
if self._distinct_indexes[i] >= 0:
if args in self._distinct_data_views[self._distinct_indexes[i]]:
if self._distinct_data_views[self._distinct_indexes[i]][args] > 1:
continue
else:
raise Exception(
"The args are not in the distinct data view, this should not happen.")
self._udfs[i].accumulate(self._accumulators[i], *args)
def retract(self, input_data: Row):
for i in range(len(self._udfs)):
if i in self._distinct_data_views:
if len(self._distinct_view_descriptors[i].get_filter_args()) == 0:
filtered = False
else:
filtered = True
for filter_arg in self._distinct_view_descriptors[i].get_filter_args():
if input_data[filter_arg]:
filtered = False
break
if not filtered:
input_extractor = self._distinct_view_descriptors[i].get_input_extractor()
args = input_extractor(input_data)
if args in self._distinct_data_views[i]:
self._distinct_data_views[i][args] -= 1
if self._distinct_data_views[i][args] == 0:
del self._distinct_data_views[i][args]
if self._filter_args[i] >= 0 and not input_data[self._filter_args[i]]:
continue
input_extractor = self._input_extractors[i]
args = input_extractor(input_data)
if self._distinct_indexes[i] >= 0 and \
args in self._distinct_data_views[self._distinct_indexes[i]]:
continue
self._udfs[i].retract(self._accumulators[i], *args)
def merge(self, accumulators: List):
for i in range(len(self._udfs)):
self._udfs[i].merge(self._accumulators[i], [accumulators[i]])
def set_accumulators(self, accumulators: List):
if self._udf_data_views:
for i in range(len(self._udf_data_views)):
for index, data_view in self._udf_data_views[i].items():
accumulators[i][index] = data_view
self._accumulators = accumulators
def get_accumulators(self):
return self._accumulators
def create_accumulators(self):
return [udf.create_accumulator() for udf in self._udfs]
def cleanup(self):
for i in range(len(self._udf_data_views)):
for data_view in self._udf_data_views[i].values():
data_view.clear()
def close(self):
for udf in self._udfs:
udf.close()
class SimpleAggsHandleFunction(SimpleAggsHandleFunctionBase, AggsHandleFunction):
"""
A simple AggsHandleFunction implementation which provides the basic functionality.
"""
def __init__(self,
udfs: List[AggregateFunction],
input_extractors: List,
index_of_count_star: int,
count_star_inserted: bool,
udf_data_view_specs: List[List[DataViewSpec]],
filter_args: List[int],
distinct_indexes: List[int],
distinct_view_descriptors: Dict[int, DistinctViewDescriptor]):
super(SimpleAggsHandleFunction, self).__init__(
udfs, input_extractors, udf_data_view_specs, filter_args, distinct_indexes,
distinct_view_descriptors)
self._get_value_indexes = [i for i in range(len(udfs))]
if index_of_count_star >= 0 and count_star_inserted:
# The record count is used internally, should be ignored by the get_value method.
self._get_value_indexes.remove(index_of_count_star)
def get_value(self):
return [self._udfs[i].get_value(self._accumulators[i]) for i in self._get_value_indexes]
class SimpleTableAggsHandleFunction(SimpleAggsHandleFunctionBase, TableAggsHandleFunction):
"""
A simple TableAggsHandleFunction implementation which provides the basic functionality.
"""
def __init__(self,
udfs: List[TableAggregateFunction],
input_extractors: List,
udf_data_view_specs: List[List[DataViewSpec]],
filter_args: List[int],
distinct_indexes: List[int],
distinct_view_descriptors: Dict[int, DistinctViewDescriptor]):
super(SimpleTableAggsHandleFunction, self).__init__(
udfs, input_extractors, udf_data_view_specs, filter_args, distinct_indexes,
distinct_view_descriptors)
def emit_value(self, current_key: List, is_retract: bool):
udf = self._udfs[0] # type: TableAggregateFunction
results = udf.emit_value(self._accumulators[0])
for x in results:
result = join_row(current_key, self._convert_to_row(x))
if is_retract:
result.set_row_kind(RowKind.DELETE)
else:
result.set_row_kind(RowKind.INSERT)
yield result
def _convert_to_row(self, data):
if isinstance(data, Row):
return data._values
elif isinstance(data, tuple):
return list(data)
else:
return [data]
class RecordCounter(ABC):
"""
The RecordCounter is used to count the number of input records under the current key.
"""
@abstractmethod
def record_count_is_zero(self, acc):
pass
@staticmethod
def of(index_of_count_star):
if index_of_count_star >= 0:
return RetractionRecordCounter(index_of_count_star)
else:
return AccumulationRecordCounter()
class AccumulationRecordCounter(RecordCounter):
def record_count_is_zero(self, acc):
# when all the inputs are accumulations, the count will never be zero
return acc is None
class RetractionRecordCounter(RecordCounter):
def __init__(self, index_of_count_star):
self._index_of_count_star = index_of_count_star
def record_count_is_zero(self, acc: List):
# We store the counter in the accumulator and the counter is never be null
return acc is None or acc[self._index_of_count_star][0] == 0
class GroupAggFunctionBase(object):
def __init__(self,
aggs_handle: AggsHandleFunctionBase,
key_selector: RowKeySelector,
state_backend,
state_value_coder,
generate_update_before: bool,
state_cleaning_enabled: bool,
index_of_count_star: int):
self.aggs_handle = aggs_handle
self.generate_update_before = generate_update_before
self.state_cleaning_enabled = state_cleaning_enabled
self.key_selector = key_selector
self.state_value_coder = state_value_coder
self.state_backend = state_backend
self.record_counter = RecordCounter.of(index_of_count_star)
self.buffer = {}
def open(self, function_context: FunctionContext):
self.aggs_handle.open(PerKeyStateDataViewStore(function_context, self.state_backend))
def close(self):
self.aggs_handle.close()
def process_element(self, input_data: Row):
input_value = input_data._values
key = self.key_selector.get_key(input_value)
try:
self.buffer[tuple(key)].append(input_data)
except KeyError:
self.buffer[tuple(key)] = [input_data]
def on_timer(self, key: Row):
if self.state_cleaning_enabled:
self.state_backend.set_current_key(list(key._values))
accumulator_state = self.state_backend.get_value_state(
"accumulators", self.state_value_coder)
accumulator_state.clear()
self.aggs_handle.cleanup()
@abstractmethod
def finish_bundle(self):
pass
class GroupAggFunction(GroupAggFunctionBase):
def __init__(self,
aggs_handle: AggsHandleFunction,
key_selector: RowKeySelector,
state_backend,
state_value_coder,
generate_update_before: bool,
state_cleaning_enabled: bool,
index_of_count_star: int):
super(GroupAggFunction, self).__init__(
aggs_handle, key_selector, state_backend, state_value_coder, generate_update_before,
state_cleaning_enabled, index_of_count_star)
def finish_bundle(self):
for current_key, input_rows in self.buffer.items():
current_key = list(current_key)
first_row = False
self.state_backend.set_current_key(current_key)
self.state_backend.clear_cached_iterators()
accumulator_state = self.state_backend.get_value_state(
"accumulators", self.state_value_coder)
accumulators = accumulator_state.value() # type: List
start_index = 0
if accumulators is None:
for i in range(len(input_rows)):
if input_rows[i]._is_retract_msg():
start_index += 1
else:
break
if start_index == len(input_rows):
return
accumulators = self.aggs_handle.create_accumulators()
first_row = True
# set accumulators to handler first
self.aggs_handle.set_accumulators(accumulators)
# get previous aggregate result
pre_agg_value = self.aggs_handle.get_value() # type: List
for input_row in input_rows[start_index:]:
# update aggregate result and set to the newRow
if input_row._is_accumulate_msg():
# accumulate input
self.aggs_handle.accumulate(input_row)
else:
# retract input
self.aggs_handle.retract(input_row)
# get current aggregate result
new_agg_value = self.aggs_handle.get_value() # type: List
# get accumulator
accumulators = self.aggs_handle.get_accumulators()
if not self.record_counter.record_count_is_zero(accumulators):
# we aggregated at least one record for this key
# update the state
accumulator_state.update(accumulators)
# if this was not the first row and we have to emit retractions
if not first_row:
if pre_agg_value != new_agg_value:
# retract previous result
if self.generate_update_before:
# prepare UPDATE_BEFORE message for previous row
retract_row = join_row(current_key, pre_agg_value)
retract_row.set_row_kind(RowKind.UPDATE_BEFORE)
yield retract_row
# prepare UPDATE_AFTER message for new row
result_row = join_row(current_key, new_agg_value)
result_row.set_row_kind(RowKind.UPDATE_AFTER)
yield result_row
else:
# this is the first, output new result
# prepare INSERT message for new row
result_row = join_row(current_key, new_agg_value)
result_row.set_row_kind(RowKind.INSERT)
yield result_row
else:
# we retracted the last record for this key
# sent out a delete message
if not first_row:
# prepare delete message for previous row
result_row = join_row(current_key, pre_agg_value)
result_row.set_row_kind(RowKind.DELETE)
yield result_row
# and clear all state
accumulator_state.clear()
# cleanup dataview under current key
self.aggs_handle.cleanup()
self.buffer = {}
class GroupTableAggFunction(GroupAggFunctionBase):
def __init__(self,
aggs_handle: TableAggsHandleFunction,
key_selector: RowKeySelector,
state_backend,
state_value_coder,
generate_update_before: bool,
state_cleaning_enabled: bool,
index_of_count_star: int):
super(GroupTableAggFunction, self).__init__(
aggs_handle, key_selector, state_backend, state_value_coder, generate_update_before,
state_cleaning_enabled, index_of_count_star)
def finish_bundle(self):
for current_key, input_rows in self.buffer.items():
current_key = list(current_key)
first_row = False
self.state_backend.set_current_key(current_key)
self.state_backend.clear_cached_iterators()
accumulator_state = self.state_backend.get_value_state(
"accumulators", self.state_value_coder)
accumulators = accumulator_state.value()
start_index = 0
if accumulators is None:
for i in range(len(input_rows)):
if input_rows[i]._is_retract_msg():
start_index += 1
else:
break
if start_index == len(input_rows):
return
accumulators = self.aggs_handle.create_accumulators()
first_row = True
# set accumulators to handler first
self.aggs_handle.set_accumulators(accumulators)
if not first_row and self.generate_update_before:
yield from self.aggs_handle.emit_value(current_key, True)
for input_row in input_rows[start_index:]:
# update aggregate result and set to the newRow
if input_row._is_accumulate_msg():
# accumulate input
self.aggs_handle.accumulate(input_row)
else:
# retract input
self.aggs_handle.retract(input_row)
# get accumulator
accumulators = self.aggs_handle.get_accumulators()
if not self.record_counter.record_count_is_zero(accumulators):
yield from self.aggs_handle.emit_value(current_key, False)
accumulator_state.update(accumulators)
else:
# and clear all state
accumulator_state.clear()
# cleanup dataview under current key
self.aggs_handle.cleanup()
self.buffer = {}
| 23,406 | 37.43514 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/table/window_context.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC, abstractmethod
from typing import Generic, TypeVar, List, Iterable
from pyflink.common.constants import MAX_LONG_VALUE
from pyflink.datastream.state import StateDescriptor, State, ValueStateDescriptor, \
ListStateDescriptor, MapStateDescriptor
from pyflink.datastream.window import TimeWindow, CountWindow
from pyflink.fn_execution.datastream.process.timerservice_impl import LegacyInternalTimerServiceImpl
from pyflink.fn_execution.coders import from_type_info, MapCoder, GenericArrayCoder
from pyflink.fn_execution.internal_state import InternalMergingState
K = TypeVar('K')
W = TypeVar('W', TimeWindow, CountWindow)
class Context(Generic[K, W], ABC):
"""
Information available in an invocation of methods of InternalWindowProcessFunction.
"""
@abstractmethod
def get_partitioned_state(self, state_descriptor: StateDescriptor) -> State:
"""
Creates a partitioned state handle, using the state backend configured for this task.
"""
pass
@abstractmethod
def current_key(self) -> K:
"""
Returns current key of current processed element.
"""
pass
@abstractmethod
def current_processing_time(self) -> int:
"""
Returns the current processing time.
"""
pass
@abstractmethod
def current_watermark(self) -> int:
"""
Returns the current event-time watermark.
"""
pass
@abstractmethod
def get_window_accumulators(self, window: W) -> List:
"""
Gets the accumulators of the given window.
"""
pass
@abstractmethod
def set_window_accumulators(self, window: W, acc: List):
"""
Sets the accumulators of the given window.
"""
pass
@abstractmethod
def clear_window_state(self, window: W):
"""
Clear window state of the given window.
"""
pass
@abstractmethod
def clear_trigger(self, window: W):
"""
Call Trigger#clear(Window) on trigger.
"""
pass
@abstractmethod
def on_merge(self, new_window: W, merged_windows: Iterable[W]):
"""
Call Trigger.on_merge() on trigger.
"""
pass
@abstractmethod
def delete_cleanup_timer(self, window: W):
"""
Deletes the cleanup timer set for the contents of the provided window.
"""
pass
class WindowContext(Context[K, W]):
"""
Context of window.
"""
def __init__(self,
window_operator,
trigger_context: 'TriggerContext',
state_backend,
state_value_coder,
timer_service: LegacyInternalTimerServiceImpl,
is_event_time: bool):
self._window_operator = window_operator
self._trigger_context = trigger_context
self._state_backend = state_backend
self.timer_service = timer_service
self.is_event_time = is_event_time
self.window_state = self._state_backend.get_value_state("window_state", state_value_coder)
def get_partitioned_state(self, state_descriptor: StateDescriptor) -> State:
return self._trigger_context.get_partitioned_state(state_descriptor)
def current_key(self) -> K:
return self._state_backend.get_current_key()
def current_processing_time(self) -> int:
return self.timer_service.current_processing_time()
def current_watermark(self) -> int:
return self.timer_service.current_watermark()
def get_window_accumulators(self, window: W) -> List:
self.window_state.set_current_namespace(window)
return self.window_state.value()
def set_window_accumulators(self, window: W, acc: List):
self.window_state.set_current_namespace(window)
self.window_state.update(acc)
def clear_window_state(self, window: W):
self.window_state.set_current_namespace(window)
self.window_state.clear()
def clear_trigger(self, window: W):
self._trigger_context.window = window
self._trigger_context.clear()
def on_merge(self, new_window: W, merged_windows: Iterable[W]):
self._trigger_context.window = new_window
self._trigger_context.merged_windows = merged_windows
self._trigger_context.on_merge()
def delete_cleanup_timer(self, window: W):
cleanup_time = self._window_operator.cleanup_time(window)
if cleanup_time == MAX_LONG_VALUE:
# no need to clean up because we didn't set one
return
if self.is_event_time:
self._trigger_context.delete_event_time_timer(cleanup_time)
else:
self._trigger_context.delete_processing_time_timer(cleanup_time)
class TriggerContext(object):
"""
TriggerContext is a utility for handling Trigger invocations. It can be reused by setting the
key and window fields. No internal state must be kept in the TriggerContext.
"""
def __init__(self,
trigger,
timer_service: LegacyInternalTimerServiceImpl[W],
state_backend):
self._trigger = trigger
self._timer_service = timer_service
self._state_backend = state_backend
self.window = None # type: W
self.merged_windows = None # type: Iterable[W]
def open(self):
self._trigger.open(self)
def on_element(self, row, timestamp: int) -> bool:
return self._trigger.on_element(row, timestamp, self.window)
def on_processing_time(self, timestamp: int) -> bool:
return self._trigger.on_processing_time(timestamp, self.window)
def on_event_time(self, timestamp: int) -> bool:
return self._trigger.on_event_time(timestamp, self.window)
def on_merge(self):
self._trigger.on_merge(self.window, self)
def get_current_processing_time(self) -> int:
return self._timer_service.current_processing_time()
def get_current_watermark(self) -> int:
return self._timer_service.current_watermark()
def register_processing_time_timer(self, time: int):
self._timer_service.register_processing_time_timer(self.window, time)
def register_event_time_timer(self, time: int):
self._timer_service.register_event_time_timer(self.window, time)
def delete_processing_time_timer(self, time: int):
self._timer_service.delete_processing_time_timer(self.window, time)
def delete_event_time_timer(self, time: int):
self._timer_service.delete_event_time_timer(self.window, time)
def clear(self):
self._trigger.clear(self.window)
def get_partitioned_state(self, state_descriptor: StateDescriptor) -> State:
if isinstance(state_descriptor, ValueStateDescriptor):
state = self._state_backend.get_value_state(
state_descriptor.name, from_type_info(state_descriptor.type_info))
elif isinstance(state_descriptor, ListStateDescriptor):
array_coder = from_type_info(state_descriptor.type_info) # type: GenericArrayCoder
state = self._state_backend.get_list_state(
state_descriptor.name, array_coder._elem_coder)
elif isinstance(state_descriptor, MapStateDescriptor):
map_coder = from_type_info(state_descriptor.type_info) # type: MapCoder
key_coder = map_coder._key_coder
value_coder = map_coder._value_coder
state = self._state_backend.get_map_state(
state_descriptor.name, key_coder, value_coder)
else:
raise Exception("Unknown supported StateDescriptor %s" % state_descriptor)
state.set_current_namespace(self.window)
return state
def merge_partitioned_state(self, state_descriptor: StateDescriptor):
if not self.merged_windows:
state = self.get_partitioned_state(state_descriptor)
if isinstance(state, InternalMergingState):
state.merge_namespaces(self.window, self.merged_windows)
else:
raise Exception("The given state descriptor does not refer to a mergeable state"
" (MergingState)")
| 9,220 | 35.737052 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/table/window_assigner.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import math
from abc import ABC, abstractmethod
from typing import Generic, List, Any, Iterable, Set
from pyflink.common.typeinfo import Types
from pyflink.datastream.state import ValueStateDescriptor, ValueState
from pyflink.datastream.window import TimeWindow, CountWindow
from pyflink.fn_execution.table.window_context import Context, W
class WindowAssigner(Generic[W], ABC):
"""
A WindowAssigner assigns zero or more Windows to an element.
In a window operation, elements are grouped by their key (if available) and by the windows to
which it was assigned. The set of elements with the same key and window is called a pane. When a
Trigger decides that a certain pane should fire the window to produce output elements for
that pane.
"""
def open(self, ctx: Context[Any, W]):
"""
Initialization method for the function. It is called before the actual working methods.
"""
pass
@abstractmethod
def assign_windows(self, element: List, timestamp: int) -> Iterable[W]:
"""
Given the timestamp and element, returns the set of windows into which it should be placed.
:param element: The element to which windows should be assigned.
:param timestamp: The timestamp of the element when {@link #isEventTime()} returns true, or
the current system time when {@link #isEventTime()} returns false.
"""
pass
@abstractmethod
def is_event_time(self) -> bool:
"""
Returns True if elements are assigned to windows based on event time, False otherwise.
"""
pass
class PanedWindowAssigner(WindowAssigner[W], ABC):
"""
A WindowAssigner that window can be split into panes.
"""
@abstractmethod
def assign_pane(self, element, timestamp: int) -> W:
pass
@abstractmethod
def split_into_panes(self, window: W) -> Iterable[W]:
pass
@abstractmethod
def get_last_window(self, pane: W) -> W:
pass
class MergingWindowAssigner(WindowAssigner[W], ABC):
"""
A WindowAssigner that can merge windows.
"""
class MergeCallback:
"""
Callback to be used in merge_windows(W, List[W], MergeCallback) for
specifying which windows should be merged.
"""
@abstractmethod
def merge(self, merge_result: W, to_be_merged: Iterable[W]):
"""
Specifies that the given windows should be merged into the result window.
:param merge_result: The resulting merged window.
:param to_be_merged: The list of windows that should be merged into one window.
"""
pass
@abstractmethod
def merge_windows(self, new_window: W, sorted_windows: List[W], merge_callback: MergeCallback):
"""
Determines which windows (if any) should be merged.
:param new_window: The new window.
:param sorted_windows: The sorted window candidates.
:param merge_callback: A callback that can be invoked to signal which windows should be
merged.
"""
pass
class TumblingWindowAssigner(WindowAssigner[TimeWindow]):
"""
A WindowAssigner that windows elements into fixed-size windows based on the timestamp of
the elements. Windows cannot overlap.
"""
def __init__(self, size: int, offset: int, is_event_time: bool):
self._size = size
self._offset = offset
self._is_event_time = is_event_time
def assign_windows(self, element: List, timestamp: int) -> Iterable[TimeWindow]:
start = TimeWindow.get_window_start_with_offset(timestamp, self._offset, self._size)
return [TimeWindow(start, start + self._size)]
def is_event_time(self) -> bool:
return self._is_event_time
def __repr__(self):
return "TumblingWindow(%s)" % self._size
class CountTumblingWindowAssigner(WindowAssigner[CountWindow]):
"""
A WindowAssigner that windows elements into fixed-size windows based on the count number
of the elements. Windows cannot overlap.
"""
def __init__(self, size: int):
self._size = size
self._count = None # type: ValueState
def open(self, ctx: Context[Any, CountWindow]):
value_state_descriptor = ValueStateDescriptor('tumble-count-assigner', Types.LONG())
self._count = ctx.get_partitioned_state(value_state_descriptor)
def assign_windows(self, element: List, timestamp: int) -> Iterable[CountWindow]:
count_value = self._count.value()
if count_value is None:
current_count = 0
else:
current_count = count_value
id = current_count // self._size
self._count.update(current_count + 1)
return [CountWindow(id)]
def is_event_time(self) -> bool:
return False
def __repr__(self):
return "CountTumblingWindow(%s)" % self._size
class SlidingWindowAssigner(PanedWindowAssigner[TimeWindow]):
"""
A WindowAssigner that windows elements into sliding windows based on the timestamp of the
elements. Windows can possibly overlap.
"""
def __init__(self, size: int, slide: int, offset: int, is_event_time: bool):
self._size = size
self._slide = slide
self._offset = offset
self._is_event_time = is_event_time
self._pane_size = math.gcd(size, slide)
self._num_panes_per_window = size // self._pane_size
def assign_pane(self, element, timestamp: int) -> TimeWindow:
start = TimeWindow.get_window_start_with_offset(timestamp, self._offset, self._pane_size)
return TimeWindow(start, start + self._pane_size)
def split_into_panes(self, window: W) -> Iterable[TimeWindow]:
start = window.start
for i in range(self._num_panes_per_window):
yield TimeWindow(start, start + self._pane_size)
start += self._pane_size
def get_last_window(self, pane: W) -> TimeWindow:
last_start = TimeWindow.get_window_start_with_offset(pane.start, self._offset, self._slide)
return TimeWindow(last_start, last_start + self._size)
def assign_windows(self, element: List, timestamp: int) -> Iterable[TimeWindow]:
last_start = TimeWindow.get_window_start_with_offset(timestamp, self._offset, self._slide)
windows = [TimeWindow(start, start + self._size)
for start in range(last_start, timestamp - self._size, -self._slide)]
return windows
def is_event_time(self) -> bool:
return self._is_event_time
def __repr__(self):
return "SlidingWindowAssigner(%s, %s)" % (self._size, self._slide)
class CountSlidingWindowAssigner(WindowAssigner[CountWindow]):
"""
A WindowAssigner that windows elements into sliding windows based on the count number of
the elements. Windows can possibly overlap.
"""
def __init__(self, size, slide):
self._size = size
self._slide = slide
self._count = None # type: ValueState
def open(self, ctx: Context[Any, CountWindow]):
count_descriptor = ValueStateDescriptor('slide-count-assigner', Types.LONG())
self._count = ctx.get_partitioned_state(count_descriptor)
def assign_windows(self, element: List, timestamp: int) -> Iterable[W]:
count_value = self._count.value()
if count_value is None:
current_count = 0
else:
current_count = count_value
self._count.update(current_count + 1)
last_id = current_count // self._slide
last_start = last_id * self._slide
last_end = last_start + self._size - 1
windows = []
while last_id >= 0 and last_start <= current_count <= last_end:
if last_start <= current_count <= last_end:
windows.append(CountWindow(last_id))
last_id -= 1
last_start -= self._slide
last_end -= self._slide
return windows
def is_event_time(self) -> bool:
return False
def __repr__(self):
return "CountSlidingWindowAssigner(%s, %s)" % (self._size, self._slide)
class SessionWindowAssigner(MergingWindowAssigner[TimeWindow]):
"""
WindowAssigner that windows elements into sessions based on the timestamp. Windows cannot
overlap.
"""
def __init__(self, session_gap: int, is_event_time: bool):
self._session_gap = session_gap
self._is_event_time = is_event_time
def merge_windows(self, new_window: W, sorted_windows: List[TimeWindow],
merge_callback: MergingWindowAssigner.MergeCallback):
ceiling = self._ceiling_window(new_window, sorted_windows)
floor = self._floor_window(new_window, sorted_windows)
merge_result = new_window
merged_windows = set()
if ceiling:
merge_result = self._merge_window(merge_result, ceiling, merged_windows)
if floor:
merge_result = self._merge_window(merge_result, floor, merged_windows)
if merged_windows:
merged_windows.add(new_window)
merge_callback.merge(merge_result, merged_windows)
def assign_windows(self, element: List, timestamp: int) -> Iterable[TimeWindow]:
return [TimeWindow(timestamp, timestamp + self._session_gap)]
def is_event_time(self) -> bool:
return self._is_event_time
@staticmethod
def _ceiling_window(new_window: TimeWindow, sorted_windows: List[TimeWindow]):
if not sorted_windows:
return None
window_num = len(sorted_windows)
if sorted_windows[0] >= new_window:
return None
for i in range(window_num - 1):
if sorted_windows[i] <= new_window <= sorted_windows[i + 1]:
return sorted_windows[i]
return sorted_windows[window_num - 1]
@staticmethod
def _floor_window(new_window: TimeWindow, sorted_windows: List[TimeWindow]):
if not sorted_windows:
return None
window_num = len(sorted_windows)
if sorted_windows[window_num - 1] <= new_window:
return None
for i in range(window_num - 1):
if sorted_windows[i] <= new_window <= sorted_windows[i + 1]:
return sorted_windows[i + 1]
return sorted_windows[0]
# Merge curWindow and other, return a new window which covers curWindow and other if they are
# overlapped. Otherwise, returns the curWindow itself.
@staticmethod
def _merge_window(cur_window: TimeWindow, other: TimeWindow, merged_windows: Set[TimeWindow]):
if cur_window.intersects(other):
merged_windows.add(other)
return cur_window.cover(other)
else:
return cur_window
def __repr__(self):
return "SessionWindowAssigner(%s)" % self._session_gap
| 11,840 | 36.590476 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/table/state_data_view.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from abc import ABC, abstractmethod
from typing import TypeVar, Generic, Union
from pyflink.datastream.state import ListState, MapState
from pyflink.fn_execution.coders import from_proto, PickleCoder
from pyflink.fn_execution.internal_state import InternalListState, InternalMapState
from pyflink.fn_execution.utils.operation_utils import is_built_in_function, load_aggregate_function
from pyflink.table import FunctionContext
from pyflink.table.data_view import ListView, MapView, DataView
def extract_data_view_specs_from_accumulator(current_index, accumulator):
# for built in functions we extract the data view specs from their accumulator
i = -1
extracted_specs = []
for field in accumulator:
i += 1
# TODO: infer the coder from the input types and output type of the built-in functions
if isinstance(field, MapView):
extracted_specs.append(MapViewSpec(
"builtInAgg%df%d" % (current_index, i), i, PickleCoder(), PickleCoder()))
elif isinstance(field, ListView):
extracted_specs.append(ListViewSpec(
"builtInAgg%df%d" % (current_index, i), i, PickleCoder()))
return extracted_specs
def extract_data_view_specs(udfs):
extracted_udf_data_view_specs = []
current_index = -1
for udf in udfs:
current_index += 1
udf_data_view_specs_proto = udf.specs
if not udf_data_view_specs_proto:
if is_built_in_function(udf.payload):
built_in_function = load_aggregate_function(udf.payload)
accumulator = built_in_function.create_accumulator()
extracted_udf_data_view_specs.append(
extract_data_view_specs_from_accumulator(current_index, accumulator))
else:
extracted_udf_data_view_specs.append([])
else:
extracted_specs = []
for spec_proto in udf_data_view_specs_proto:
state_id = spec_proto.name
field_index = spec_proto.field_index
if spec_proto.HasField("list_view"):
element_coder = from_proto(spec_proto.list_view.element_type)
extracted_specs.append(ListViewSpec(state_id, field_index, element_coder))
elif spec_proto.HasField("map_view"):
key_coder = from_proto(spec_proto.map_view.key_type)
value_coder = from_proto(spec_proto.map_view.value_type)
extracted_specs.append(
MapViewSpec(state_id, field_index, key_coder, value_coder))
else:
raise Exception("Unsupported data view spec type: " + spec_proto.type)
extracted_udf_data_view_specs.append(extracted_specs)
if all([len(i) == 0 for i in extracted_udf_data_view_specs]):
return []
return extracted_udf_data_view_specs
N = TypeVar('N')
class StateDataView(DataView, Generic[N]):
@abstractmethod
def set_current_namespace(self, namespace: N):
"""
Sets current namespace for state.
"""
pass
class StateListView(ListView, StateDataView[N], ABC):
def __init__(self, list_state: Union[ListState, InternalListState]):
super().__init__()
self._list_state = list_state
def get(self):
return self._list_state.get()
def add(self, value):
self._list_state.add(value)
def add_all(self, values):
self._list_state.add_all(values)
def clear(self):
self._list_state.clear()
def __hash__(self) -> int:
return hash([i for i in self.get()])
class KeyedStateListView(StateListView[N]):
"""
KeyedStateListView is a default implementation of StateListView whose underlying
representation is a keyed state.
"""
def __init__(self, list_state: ListState):
super(KeyedStateListView, self).__init__(list_state)
def set_current_namespace(self, namespace: N):
raise Exception("KeyedStateListView doesn't support set_current_namespace")
class NamespacedStateListView(StateListView[N]):
"""
NamespacedStateListView is a StateListView whose underlying representation is a keyed and
namespaced state. It also supports changing current namespace.
"""
def __init__(self, list_state: InternalListState):
super(NamespacedStateListView, self).__init__(list_state)
def set_current_namespace(self, namespace: N):
self._list_state.set_current_namespace(namespace)
class StateMapView(MapView, StateDataView[N], ABC):
def __init__(self, map_state: Union[MapState, InternalMapState]):
super().__init__()
self._map_state = map_state
def get(self, key):
return self._map_state.get(key)
def put(self, key, value) -> None:
self._map_state.put(key, value)
def put_all(self, dict_value) -> None:
self._map_state.put_all(dict_value)
def remove(self, key) -> None:
self._map_state.remove(key)
def contains(self, key) -> bool:
return self._map_state.contains(key)
def items(self):
return self._map_state.items()
def keys(self):
return self._map_state.keys()
def values(self):
return self._map_state.values()
def is_empty(self) -> bool:
return self._map_state.is_empty()
def clear(self) -> None:
return self._map_state.clear()
class KeyedStateMapView(StateMapView[N]):
"""
KeyedStateMapView is a default implementation of StateMapView whose underlying
representation is a keyed state.
"""
def __init__(self, map_state: MapState):
super(KeyedStateMapView, self).__init__(map_state)
def set_current_namespace(self, namespace: N):
raise Exception("KeyedStateMapView doesn't support set_current_namespace")
class NamespacedStateMapView(StateMapView[N]):
"""
NamespacedStateMapView is a StateMapView whose underlying representation is a keyed and
namespaced state. It also supports changing current namespace.
"""
def __init__(self, map_state: InternalMapState):
super(NamespacedStateMapView, self).__init__(map_state)
def set_current_namespace(self, namespace: N):
self._map_state.set_current_namespace(namespace)
class DataViewSpec(object):
def __init__(self, state_id, field_index):
self.state_id = state_id
self.field_index = field_index
class ListViewSpec(DataViewSpec):
def __init__(self, state_id, field_index, element_coder):
super(ListViewSpec, self).__init__(state_id, field_index)
self.element_coder = element_coder
class MapViewSpec(DataViewSpec):
def __init__(self, state_id, field_index, key_coder, value_coder):
super(MapViewSpec, self).__init__(state_id, field_index)
self.key_coder = key_coder
self.value_coder = value_coder
class StateDataViewStore(ABC):
"""
This interface contains methods for registering StateDataView with a managed store.
"""
def __init__(self,
function_context: FunctionContext,
keyed_state_backend):
self._function_context = function_context
self._keyed_state_backend = keyed_state_backend
def get_runtime_context(self):
return self._function_context
@abstractmethod
def get_state_list_view(self, state_name, element_coder):
"""
Creates a state list view.
:param state_name: The name of underlying state of the list view.
:param element_coder: The element coder
:return: a keyed list state
"""
pass
@abstractmethod
def get_state_map_view(self, state_name, key_coder, value_coder):
"""
Creates a state map view.
:param state_name: The name of underlying state of the map view.
:param key_coder: The key coder
:param value_coder: The value coder
:return: a keyed map state
"""
pass
class PerKeyStateDataViewStore(StateDataViewStore):
"""
Default implementation of StateDataViewStore.
"""
def __init__(self,
function_context: FunctionContext,
keyed_state_backend):
super(PerKeyStateDataViewStore, self).__init__(function_context, keyed_state_backend)
def get_state_list_view(self, state_name, element_coder):
return KeyedStateListView(
self._keyed_state_backend.get_list_state(state_name, element_coder))
def get_state_map_view(self, state_name, key_coder, value_coder):
return KeyedStateMapView(
self._keyed_state_backend.get_map_state(state_name, key_coder, value_coder))
class PerWindowStateDataViewStore(StateDataViewStore):
"""
An implementation of StateDataViewStore for window aggregates which forwards the state
registration to an underlying RemoteKeyedStateBackend. The state created by this store has the
ability to switch window namespaces.
"""
def __init__(self,
function_context: FunctionContext,
keyed_state_backend):
super(PerWindowStateDataViewStore, self).__init__(function_context, keyed_state_backend)
def get_state_list_view(self, state_name, element_coder):
return NamespacedStateListView(
self._keyed_state_backend.get_list_state(state_name, element_coder))
def get_state_map_view(self, state_name, key_coder, value_coder):
return NamespacedStateMapView(
self._keyed_state_backend.get_map_state(state_name, key_coder, value_coder))
| 10,571 | 34.122924 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/table/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/table/operations.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import abc
from functools import reduce
from itertools import chain
from typing import Tuple
from pyflink import fn_execution
if fn_execution.PYFLINK_CYTHON_ENABLED:
from pyflink.fn_execution.table.aggregate_fast import RowKeySelector, \
SimpleAggsHandleFunction, GroupAggFunction, DistinctViewDescriptor, \
SimpleTableAggsHandleFunction, GroupTableAggFunction
from pyflink.fn_execution.table.window_aggregate_fast import \
SimpleNamespaceAggsHandleFunction, GroupWindowAggFunction
from pyflink.fn_execution.coder_impl_fast import InternalRow
else:
from pyflink.fn_execution.table.aggregate_slow import RowKeySelector, \
SimpleAggsHandleFunction, GroupAggFunction, DistinctViewDescriptor, \
SimpleTableAggsHandleFunction, GroupTableAggFunction
from pyflink.fn_execution.table.window_aggregate_slow import \
SimpleNamespaceAggsHandleFunction, GroupWindowAggFunction
from pyflink.fn_execution.coders import DataViewFilterCoder, PickleCoder
from pyflink.fn_execution.datastream.timerservice import InternalTimer
from pyflink.fn_execution.datastream.operations import Operation
from pyflink.fn_execution.datastream.process.timerservice_impl import (
TimerOperandType, InternalTimerImpl)
from pyflink.fn_execution.table.state_data_view import extract_data_view_specs
from pyflink.fn_execution.table.window_assigner import TumblingWindowAssigner, \
CountTumblingWindowAssigner, SlidingWindowAssigner, CountSlidingWindowAssigner, \
SessionWindowAssigner
from pyflink.fn_execution.table.window_trigger import EventTimeTrigger, ProcessingTimeTrigger, \
CountTrigger
from pyflink.fn_execution.utils import operation_utils
from pyflink.fn_execution.utils.operation_utils import extract_user_defined_aggregate_function
from pyflink.fn_execution.metrics.process.metric_impl import GenericMetricGroup
from pyflink.table import FunctionContext, Row
# UDF
SCALAR_FUNCTION_URN = "flink:transform:scalar_function:v1"
# UDTF
TABLE_FUNCTION_URN = "flink:transform:table_function:v1"
# UDAF
STREAM_GROUP_AGGREGATE_URN = "flink:transform:stream_group_aggregate:v1"
STREAM_GROUP_TABLE_AGGREGATE_URN = "flink:transform:stream_group_table_aggregate:v1"
STREAM_GROUP_WINDOW_AGGREGATE_URN = "flink:transform:stream_group_window_aggregate:v1"
# Pandas UDAF
PANDAS_AGGREGATE_FUNCTION_URN = "flink:transform:aggregate_function:arrow:v1"
PANDAS_BATCH_OVER_WINDOW_AGGREGATE_FUNCTION_URN = \
"flink:transform:batch_over_window_aggregate_function:arrow:v1"
class BundleOperation(object):
def finish_bundle(self):
raise NotImplementedError
class BaseOperation(Operation):
def __init__(self, serialized_fn):
if serialized_fn.metric_enabled:
self.base_metric_group = GenericMetricGroup(None, None)
else:
self.base_metric_group = None
self.func, self.user_defined_funcs = self.generate_func(serialized_fn)
self.job_parameters = {p.key: p.value for p in serialized_fn.job_parameters}
def finish(self):
self._update_gauge(self.base_metric_group)
def _update_gauge(self, base_metric_group):
if base_metric_group is not None:
for name in base_metric_group._flink_gauge:
flink_gauge = base_metric_group._flink_gauge[name]
beam_gauge = base_metric_group._beam_gauge[name]
beam_gauge.set(flink_gauge())
for sub_group in base_metric_group._sub_groups:
self._update_gauge(sub_group)
def process_element(self, value):
return self.func(value)
def open(self):
for user_defined_func in self.user_defined_funcs:
if hasattr(user_defined_func, 'open'):
user_defined_func.open(FunctionContext(self.base_metric_group, self.job_parameters))
def close(self):
for user_defined_func in self.user_defined_funcs:
if hasattr(user_defined_func, 'close'):
user_defined_func.close()
@abc.abstractmethod
def generate_func(self, serialized_fn) -> Tuple:
pass
class ScalarFunctionOperation(BaseOperation):
def __init__(self, serialized_fn, one_arg_optimization=False, one_result_optimization=False):
self._one_arg_optimization = one_arg_optimization
self._one_result_optimization = one_result_optimization
super(ScalarFunctionOperation, self).__init__(serialized_fn)
def generate_func(self, serialized_fn):
"""
Generates a lambda function based on udfs.
:param serialized_fn: serialized function which contains a list of the proto
representation of the Python :class:`ScalarFunction`
:return: the generated lambda function
"""
scalar_functions, variable_dict, user_defined_funcs = reduce(
lambda x, y: (
','.join([x[0], y[0]]),
dict(chain(x[1].items(), y[1].items())),
x[2] + y[2]),
[operation_utils.extract_user_defined_function(
udf, one_arg_optimization=self._one_arg_optimization)
for udf in serialized_fn.udfs])
if self._one_result_optimization:
func_str = 'lambda value: %s' % scalar_functions
else:
func_str = 'lambda value: [%s]' % scalar_functions
generate_func = eval(func_str, variable_dict)
return generate_func, user_defined_funcs
class TableFunctionOperation(BaseOperation):
def __init__(self, serialized_fn):
super(TableFunctionOperation, self).__init__(serialized_fn)
def generate_func(self, serialized_fn):
"""
Generates a lambda function based on udtfs.
:param serialized_fn: serialized function which contains the proto representation of
the Python :class:`TableFunction`
:return: the generated lambda function
"""
table_function, variable_dict, user_defined_funcs = \
operation_utils.extract_user_defined_function(serialized_fn.udfs[0])
variable_dict['normalize_table_function_result'] = \
operation_utils.normalize_table_function_result
generate_func = eval('lambda value: normalize_table_function_result(%s)' % table_function,
variable_dict)
return generate_func, user_defined_funcs
class PandasAggregateFunctionOperation(BaseOperation):
def __init__(self, serialized_fn):
super(PandasAggregateFunctionOperation, self).__init__(serialized_fn)
def generate_func(self, serialized_fn):
pandas_functions, variable_dict, user_defined_funcs = reduce(
lambda x, y: (
','.join([x[0], y[0]]),
dict(chain(x[1].items(), y[1].items())),
x[2] + y[2]),
[operation_utils.extract_user_defined_function(udf, True)
for udf in serialized_fn.udfs])
variable_dict['normalize_pandas_result'] = operation_utils.normalize_pandas_result
generate_func = eval('lambda value: normalize_pandas_result([%s])' %
pandas_functions, variable_dict)
return generate_func, user_defined_funcs
class PandasBatchOverWindowAggregateFunctionOperation(BaseOperation):
def __init__(self, serialized_fn):
super(PandasBatchOverWindowAggregateFunctionOperation, self).__init__(serialized_fn)
self.windows = [window for window in serialized_fn.windows]
# the index among all the bounded range over window
self.bounded_range_window_index = [-1 for _ in range(len(self.windows))]
# Whether the specified position window is a bounded range window.
self.is_bounded_range_window = []
from pyflink.fn_execution import flink_fn_execution_pb2
window_types = flink_fn_execution_pb2.OverWindow
bounded_range_window_nums = 0
for i, window in enumerate(self.windows):
window_type = window.window_type
if (window_type == window_types.RANGE_UNBOUNDED_PRECEDING) or (
window_type == window_types.RANGE_UNBOUNDED_FOLLOWING) or (
window_type == window_types.RANGE_SLIDING):
self.bounded_range_window_index[i] = bounded_range_window_nums
self.is_bounded_range_window.append(True)
bounded_range_window_nums += 1
else:
self.is_bounded_range_window.append(False)
def generate_func(self, serialized_fn):
user_defined_funcs = []
self.window_indexes = []
self.mapper = []
for udf in serialized_fn.udfs:
pandas_agg_function, variable_dict, user_defined_func, window_index = \
operation_utils.extract_over_window_user_defined_function(udf)
user_defined_funcs.extend(user_defined_func)
self.window_indexes.append(window_index)
self.mapper.append(eval('lambda value: %s' % pandas_agg_function, variable_dict))
return self.wrapped_over_window_function, user_defined_funcs
def wrapped_over_window_function(self, boundaries_series):
import pandas as pd
from pyflink.fn_execution import flink_fn_execution_pb2
OverWindow = flink_fn_execution_pb2.OverWindow
input_series = boundaries_series[-1]
# the row number of the arrow format data
input_cnt = len(input_series[0])
results = []
# loop every agg func
for i in range(len(self.window_indexes)):
window_index = self.window_indexes[i]
# the over window which the agg function belongs to
window = self.windows[window_index]
window_type = window.window_type
func = self.mapper[i]
result = []
if self.is_bounded_range_window[window_index]:
window_boundaries = boundaries_series[
self.bounded_range_window_index[window_index]]
if window_type == OverWindow.RANGE_UNBOUNDED_PRECEDING:
# range unbounded preceding window
for j in range(input_cnt):
end = window_boundaries[j]
series_slices = [s.iloc[:end] for s in input_series]
result.append(func(series_slices))
elif window_type == OverWindow.RANGE_UNBOUNDED_FOLLOWING:
# range unbounded following window
for j in range(input_cnt):
start = window_boundaries[j]
series_slices = [s.iloc[start:] for s in input_series]
result.append(func(series_slices))
else:
# range sliding window
for j in range(input_cnt):
start = window_boundaries[j * 2]
end = window_boundaries[j * 2 + 1]
series_slices = [s.iloc[start:end] for s in input_series]
result.append(func(series_slices))
else:
# unbounded range window or unbounded row window
if (window_type == OverWindow.RANGE_UNBOUNDED) or (
window_type == OverWindow.ROW_UNBOUNDED):
series_slices = [s.iloc[:] for s in input_series]
func_result = func(series_slices)
result = [func_result for _ in range(input_cnt)]
elif window_type == OverWindow.ROW_UNBOUNDED_PRECEDING:
# row unbounded preceding window
window_end = window.upper_boundary
for j in range(input_cnt):
end = min(j + window_end + 1, input_cnt)
series_slices = [s.iloc[: end] for s in input_series]
result.append(func(series_slices))
elif window_type == OverWindow.ROW_UNBOUNDED_FOLLOWING:
# row unbounded following window
window_start = window.lower_boundary
for j in range(input_cnt):
start = max(j + window_start, 0)
series_slices = [s.iloc[start: input_cnt] for s in input_series]
result.append(func(series_slices))
else:
# row sliding window
window_start = window.lower_boundary
window_end = window.upper_boundary
for j in range(input_cnt):
start = max(j + window_start, 0)
end = min(j + window_end + 1, input_cnt)
series_slices = [s.iloc[start: end] for s in input_series]
result.append(func(series_slices))
results.append(pd.Series(result))
return results
class BaseStatefulOperation(BaseOperation, abc.ABC):
def __init__(self, serialized_fn, keyed_state_backend):
self.keyed_state_backend = keyed_state_backend
super(BaseStatefulOperation, self).__init__(serialized_fn)
def finish(self):
super().finish()
if self.keyed_state_backend:
self.keyed_state_backend.commit()
NORMAL_RECORD = 0
TRIGGER_TIMER = 1
REGISTER_EVENT_TIMER = 0
REGISTER_PROCESSING_TIMER = 1
class AbstractStreamGroupAggregateOperation(BaseStatefulOperation):
def __init__(self, serialized_fn, keyed_state_backend):
self.generate_update_before = serialized_fn.generate_update_before
self.grouping = [i for i in serialized_fn.grouping]
self.group_agg_function = None
# If the upstream generates retract message, we need to add an additional count1() agg
# to track current accumulated messages count. If all the messages are retracted, we need
# to send a DELETE message to downstream.
self.index_of_count_star = serialized_fn.index_of_count_star
self.count_star_inserted = serialized_fn.count_star_inserted
self.state_cache_size = serialized_fn.state_cache_size
self.state_cleaning_enabled = serialized_fn.state_cleaning_enabled
self.data_view_specs = extract_data_view_specs(serialized_fn.udfs)
self.job_parameters = {p.key: p.value for p in serialized_fn.job_parameters}
super(AbstractStreamGroupAggregateOperation, self).__init__(
serialized_fn, keyed_state_backend)
def open(self):
self.group_agg_function.open(FunctionContext(self.base_metric_group, self.job_parameters))
def close(self):
self.group_agg_function.close()
def generate_func(self, serialized_fn):
user_defined_aggs = []
input_extractors = []
filter_args = []
# stores the indexes of the distinct views which the agg functions used
distinct_indexes = []
# stores the indexes of the functions which share the same distinct view
# and the filter args of them
distinct_info_dict = {}
for i in range(len(serialized_fn.udfs)):
user_defined_agg, input_extractor, filter_arg, distinct_index = \
extract_user_defined_aggregate_function(
i, serialized_fn.udfs[i], distinct_info_dict)
user_defined_aggs.append(user_defined_agg)
input_extractors.append(input_extractor)
filter_args.append(filter_arg)
distinct_indexes.append(distinct_index)
distinct_view_descriptors = {}
for agg_index_list, filter_arg_list in distinct_info_dict.values():
if -1 in filter_arg_list:
# If there is a non-filter call, we don't need to check filter or not before
# writing the distinct data view.
filter_arg_list = []
# use the agg index of the first function as the key of shared distinct view
distinct_view_descriptors[agg_index_list[0]] = DistinctViewDescriptor(
input_extractors[agg_index_list[0]], filter_arg_list)
key_selector = RowKeySelector(self.grouping)
if len(self.data_view_specs) > 0:
state_value_coder = DataViewFilterCoder(self.data_view_specs)
else:
state_value_coder = PickleCoder()
self.group_agg_function = self.create_process_function(
user_defined_aggs, input_extractors, filter_args, distinct_indexes,
distinct_view_descriptors, key_selector, state_value_coder)
return self.process_element_or_timer, []
def process_element_or_timer(self, input_data: Tuple[int, Row, int, Row]):
# the structure of the input data:
# [element_type, element(for process_element), timestamp(for timer), key(for timer)]
# all the fields are nullable except the "element_type"
if input_data[0] == NORMAL_RECORD:
if fn_execution.PYFLINK_CYTHON_ENABLED:
row = InternalRow.from_row(input_data[1])
else:
row = input_data[1]
self.group_agg_function.process_element(row)
else:
if fn_execution.PYFLINK_CYTHON_ENABLED:
timer = InternalRow.from_row(input_data[3])
else:
timer = input_data[3]
self.group_agg_function.on_timer(timer)
@abc.abstractmethod
def create_process_function(self, user_defined_aggs, input_extractors, filter_args,
distinct_indexes, distinct_view_descriptors, key_selector,
state_value_coder):
pass
class StreamGroupAggregateOperation(AbstractStreamGroupAggregateOperation, BundleOperation):
def __init__(self, serialized_fn, keyed_state_backend):
super(StreamGroupAggregateOperation, self).__init__(serialized_fn, keyed_state_backend)
def finish_bundle(self):
return self.group_agg_function.finish_bundle()
def create_process_function(self, user_defined_aggs, input_extractors, filter_args,
distinct_indexes, distinct_view_descriptors, key_selector,
state_value_coder):
aggs_handler_function = SimpleAggsHandleFunction(
user_defined_aggs,
input_extractors,
self.index_of_count_star,
self.count_star_inserted,
self.data_view_specs,
filter_args,
distinct_indexes,
distinct_view_descriptors)
return GroupAggFunction(
aggs_handler_function,
key_selector,
self.keyed_state_backend,
state_value_coder,
self.generate_update_before,
self.state_cleaning_enabled,
self.index_of_count_star)
class StreamGroupTableAggregateOperation(AbstractStreamGroupAggregateOperation, BundleOperation):
def __init__(self, serialized_fn, keyed_state_backend):
super(StreamGroupTableAggregateOperation, self).__init__(serialized_fn, keyed_state_backend)
def finish_bundle(self):
return self.group_agg_function.finish_bundle()
def create_process_function(self, user_defined_aggs, input_extractors, filter_args,
distinct_indexes, distinct_view_descriptors, key_selector,
state_value_coder):
aggs_handler_function = SimpleTableAggsHandleFunction(
user_defined_aggs,
input_extractors,
self.data_view_specs,
filter_args,
distinct_indexes,
distinct_view_descriptors)
return GroupTableAggFunction(
aggs_handler_function,
key_selector,
self.keyed_state_backend,
state_value_coder,
self.generate_update_before,
self.state_cleaning_enabled,
self.index_of_count_star)
class StreamGroupWindowAggregateOperation(AbstractStreamGroupAggregateOperation):
def __init__(self, serialized_fn, keyed_state_backend):
self._window = serialized_fn.group_window
self._named_property_extractor = self._create_named_property_function()
self._is_time_window = None
self._reuse_timer_data = Row()
self._reuse_key_data = Row()
super(StreamGroupWindowAggregateOperation, self).__init__(
serialized_fn, keyed_state_backend)
def create_process_function(self, user_defined_aggs, input_extractors, filter_args,
distinct_indexes, distinct_view_descriptors, key_selector,
state_value_coder):
from pyflink.fn_execution import flink_fn_execution_pb2
self._is_time_window = self._window.is_time_window
self._namespace_coder = self.keyed_state_backend._namespace_coder_impl
if self._window.window_type == flink_fn_execution_pb2.GroupWindow.TUMBLING_GROUP_WINDOW:
if self._is_time_window:
window_assigner = TumblingWindowAssigner(
self._window.window_size, 0, self._window.is_row_time)
else:
window_assigner = CountTumblingWindowAssigner(self._window.window_size)
elif self._window.window_type == flink_fn_execution_pb2.GroupWindow.SLIDING_GROUP_WINDOW:
if self._is_time_window:
window_assigner = SlidingWindowAssigner(
self._window.window_size, self._window.window_slide, 0,
self._window.is_row_time)
else:
window_assigner = CountSlidingWindowAssigner(
self._window.window_size, self._window.window_slide)
else:
window_assigner = SessionWindowAssigner(
self._window.window_gap, self._window.is_row_time)
if self._is_time_window:
if self._window.is_row_time:
trigger = EventTimeTrigger()
else:
trigger = ProcessingTimeTrigger()
else:
trigger = CountTrigger(self._window.window_size)
window_aggregator = SimpleNamespaceAggsHandleFunction(
user_defined_aggs,
input_extractors,
self.index_of_count_star,
self.count_star_inserted,
self._named_property_extractor,
self.data_view_specs,
filter_args,
distinct_indexes,
distinct_view_descriptors)
return GroupWindowAggFunction(
self._window.allowedLateness,
key_selector,
self.keyed_state_backend,
state_value_coder,
window_assigner,
window_aggregator,
trigger,
self._window.time_field_index,
self._window.shift_timezone)
def process_element_or_timer(self, input_data: Tuple[int, Row, int, int, Row]):
if input_data[0] == NORMAL_RECORD:
self.group_agg_function.process_watermark(input_data[3])
if fn_execution.PYFLINK_CYTHON_ENABLED:
input_row = InternalRow.from_row(input_data[1])
else:
input_row = input_data[1]
result_datas = self.group_agg_function.process_element(input_row)
for result_data in result_datas:
yield [NORMAL_RECORD, result_data, None]
timers = self.group_agg_function.get_timers()
for timer in timers:
timer_operand_type = timer[0] # type: TimerOperandType
internal_timer = timer[1] # type: InternalTimer
window = internal_timer.get_namespace()
self._reuse_key_data._values = internal_timer.get_key()
timestamp = internal_timer.get_timestamp()
encoded_window = self._namespace_coder.encode(window)
self._reuse_timer_data._values = \
[timer_operand_type.value, self._reuse_key_data, timestamp, encoded_window]
yield [TRIGGER_TIMER, None, self._reuse_timer_data]
else:
timestamp = input_data[2]
timer_data = input_data[4]
key = list(timer_data[1])
timer_type = timer_data[0]
namespace = self._namespace_coder.decode(timer_data[2])
timer = InternalTimerImpl(timestamp, key, namespace)
if timer_type == REGISTER_EVENT_TIMER:
result_datas = self.group_agg_function.on_event_time(timer)
else:
result_datas = self.group_agg_function.on_processing_time(timer)
for result_data in result_datas:
yield [NORMAL_RECORD, result_data, None]
def _create_named_property_function(self):
from pyflink.fn_execution import flink_fn_execution_pb2
named_property_extractor_array = []
for named_property in self._window.namedProperties:
if named_property == flink_fn_execution_pb2.GroupWindow.WINDOW_START:
named_property_extractor_array.append("value.start")
elif named_property == flink_fn_execution_pb2.GroupWindow.WINDOW_END:
named_property_extractor_array.append("value.end")
elif named_property == flink_fn_execution_pb2.GroupWindow.ROW_TIME_ATTRIBUTE:
named_property_extractor_array.append("value.end - 1")
elif named_property == flink_fn_execution_pb2.GroupWindow.PROC_TIME_ATTRIBUTE:
named_property_extractor_array.append("-1")
else:
raise Exception("Unexpected property %s" % named_property)
named_property_extractor_str = ','.join(named_property_extractor_array)
if named_property_extractor_str:
return eval('lambda value: [%s]' % named_property_extractor_str)
else:
return None
| 26,805 | 45.457539 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/table/window_aggregate_slow.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
from abc import ABC, abstractmethod
from typing import TypeVar, Generic, List, Dict
import pytz
from pyflink.common import Row, RowKind
from pyflink.common.constants import MAX_LONG_VALUE
from pyflink.fn_execution.datastream.timerservice import InternalTimer
from pyflink.fn_execution.datastream.process.timerservice_impl import LegacyInternalTimerServiceImpl
from pyflink.fn_execution.coders import PickleCoder
from pyflink.fn_execution.table.aggregate_slow import DistinctViewDescriptor, RowKeySelector
from pyflink.fn_execution.table.state_data_view import DataViewSpec, ListViewSpec, MapViewSpec, \
PerWindowStateDataViewStore
from pyflink.fn_execution.table.window_assigner import WindowAssigner, PanedWindowAssigner, \
MergingWindowAssigner
from pyflink.fn_execution.table.window_context import WindowContext, TriggerContext, K, W
from pyflink.fn_execution.table.window_process_function import GeneralWindowProcessFunction, \
InternalWindowProcessFunction, PanedWindowProcessFunction, MergingWindowProcessFunction
from pyflink.fn_execution.table.window_trigger import Trigger
from pyflink.table.udf import ImperativeAggregateFunction, FunctionContext
N = TypeVar('N')
def join_row(left: List, right: List):
return Row(*(left + right))
class NamespaceAggsHandleFunctionBase(Generic[N], ABC):
@abstractmethod
def open(self, state_data_view_store):
"""
Initialization method for the function. It is called before the actual working methods.
:param state_data_view_store: The object used to manage the DataView.
"""
pass
@abstractmethod
def accumulate(self, input_data: Row):
"""
Accumulates the input values to the accumulators.
:param input_data: Input values bundled in a Row.
"""
pass
@abstractmethod
def retract(self, input_data: Row):
"""
Retracts the input values from the accumulators.
:param input_data: Input values bundled in a Row.
"""
@abstractmethod
def merge(self, namespace: N, accumulators: List):
"""
Merges the other accumulators into current accumulators.
"""
pass
@abstractmethod
def set_accumulators(self, namespace: N, accumulators: List):
"""
Set the current accumulators (saved in a row) which contains the current aggregated results.
"""
pass
@abstractmethod
def get_accumulators(self) -> List:
"""
Gets the current accumulators (saved in a list) which contains the current
aggregated results.
:return: The current accumulators.
"""
pass
@abstractmethod
def create_accumulators(self) -> List:
"""
Initializes the accumulators and save them to an accumulators List.
:return: A List of accumulators which contains the aggregated results.
"""
pass
@abstractmethod
def cleanup(self, namespace: N):
"""
Cleanup for the retired accumulators state.
"""
pass
@abstractmethod
def close(self):
"""
Tear-down method for this function. It can be used for clean up work.
By default, this method does nothing.
"""
pass
class NamespaceAggsHandleFunction(NamespaceAggsHandleFunctionBase[N], ABC):
@abstractmethod
def get_value(self, namespace: N) -> List:
"""
Gets the result of the aggregation from the current accumulators and namespace properties
(like window start).
:param namespace: the namespace properties which should be calculated, such window start
:return: the final result (saved in a List) of the current accumulators.
"""
pass
class SimpleNamespaceAggsHandleFunction(NamespaceAggsHandleFunction[N]):
def __init__(self,
udfs: List[ImperativeAggregateFunction],
input_extractors: List,
index_of_count_star: int,
count_star_inserted: bool,
named_property_extractor,
udf_data_view_specs: List[List[DataViewSpec]],
filter_args: List[int],
distinct_indexes: List[int],
distinct_view_descriptors: Dict[int, DistinctViewDescriptor]):
self._udfs = udfs
self._input_extractors = input_extractors
self._named_property_extractor = named_property_extractor
self._accumulators = None # type: List
self._udf_data_view_specs = udf_data_view_specs
self._udf_data_views = []
self._filter_args = filter_args
self._distinct_indexes = distinct_indexes
self._distinct_view_descriptors = distinct_view_descriptors
self._distinct_data_views = {}
self._get_value_indexes = [i for i in range(len(udfs))]
if index_of_count_star >= 0 and count_star_inserted:
# The record count is used internally, should be ignored by the get_value method.
self._get_value_indexes.remove(index_of_count_star)
def open(self, state_data_view_store):
for udf in self._udfs:
udf.open(state_data_view_store.get_runtime_context())
self._udf_data_views = []
for data_view_specs in self._udf_data_view_specs:
data_views = {}
for data_view_spec in data_view_specs:
if isinstance(data_view_spec, ListViewSpec):
data_views[data_view_spec.field_index] = \
state_data_view_store.get_state_list_view(
data_view_spec.state_id,
data_view_spec.element_coder)
elif isinstance(data_view_spec, MapViewSpec):
data_views[data_view_spec.field_index] = \
state_data_view_store.get_state_map_view(
data_view_spec.state_id,
data_view_spec.key_coder,
data_view_spec.value_coder)
self._udf_data_views.append(data_views)
for key in self._distinct_view_descriptors.keys():
self._distinct_data_views[key] = state_data_view_store.get_state_map_view(
"agg%ddistinct" % key,
PickleCoder(),
PickleCoder())
def accumulate(self, input_data: Row):
for i in range(len(self._udfs)):
if i in self._distinct_data_views:
if len(self._distinct_view_descriptors[i].get_filter_args()) == 0:
filtered = False
else:
filtered = True
for filter_arg in self._distinct_view_descriptors[i].get_filter_args():
if input_data[filter_arg]:
filtered = False
break
if not filtered:
input_extractor = self._distinct_view_descriptors[i].get_input_extractor()
args = input_extractor(input_data)
if args in self._distinct_data_views[i]:
self._distinct_data_views[i][args] += 1
else:
self._distinct_data_views[i][args] = 1
if self._filter_args[i] >= 0 and not input_data[self._filter_args[i]]:
continue
input_extractor = self._input_extractors[i]
args = input_extractor(input_data)
if self._distinct_indexes[i] >= 0:
if args in self._distinct_data_views[self._distinct_indexes[i]]:
if self._distinct_data_views[self._distinct_indexes[i]][args] > 1:
continue
else:
raise Exception(
"The args are not in the distinct data view, this should not happen.")
self._udfs[i].accumulate(self._accumulators[i], *args)
def retract(self, input_data: Row):
for i in range(len(self._udfs)):
if i in self._distinct_data_views:
if len(self._distinct_view_descriptors[i].get_filter_args()) == 0:
filtered = False
else:
filtered = True
for filter_arg in self._distinct_view_descriptors[i].get_filter_args():
if input_data[filter_arg]:
filtered = False
break
if not filtered:
input_extractor = self._distinct_view_descriptors[i].get_input_extractor()
args = input_extractor(input_data)
if args in self._distinct_data_views[i]:
self._distinct_data_views[i][args] -= 1
if self._distinct_data_views[i][args] == 0:
del self._distinct_data_views[i][args]
if self._filter_args[i] >= 0 and not input_data[self._filter_args[i]]:
continue
input_extractor = self._input_extractors[i]
args = input_extractor(input_data)
if self._distinct_indexes[i] >= 0 and \
args in self._distinct_data_views[self._distinct_indexes[i]]:
continue
self._udfs[i].retract(self._accumulators[i], *args)
def merge(self, namespace: N, accumulators: List):
if self._udf_data_views:
for i in range(len(self._udf_data_views)):
for index, data_view in self._udf_data_views[i].items():
data_view.set_current_namespace(namespace)
accumulators[i][index] = data_view
for i in range(len(self._udfs)):
self._udfs[i].merge(self._accumulators[i], [accumulators[i]])
def set_accumulators(self, namespace: N, accumulators: List):
if self._udf_data_views and namespace is not None:
for i in range(len(self._udf_data_views)):
for index, data_view in self._udf_data_views[i].items():
data_view.set_current_namespace(namespace)
accumulators[i][index] = data_view
self._accumulators = accumulators
def get_accumulators(self) -> List:
return self._accumulators
def create_accumulators(self) -> List:
return [udf.create_accumulator() for udf in self._udfs]
def cleanup(self, namespace: N):
for i in range(len(self._udf_data_views)):
for data_view in self._udf_data_views[i].values():
data_view.set_current_namespace(namespace)
data_view.clear()
def close(self):
for udf in self._udfs:
udf.close()
def get_value(self, namespace: N) -> List:
result = [self._udfs[i].get_value(self._accumulators[i]) for i in self._get_value_indexes]
if self._named_property_extractor:
result.extend(self._named_property_extractor(namespace))
return result
class GroupWindowAggFunctionBase(Generic[K, W]):
def __init__(self,
allowed_lateness: int,
key_selector: RowKeySelector,
state_backend,
state_value_coder,
window_assigner: WindowAssigner[W],
window_aggregator: NamespaceAggsHandleFunctionBase[W],
trigger: Trigger[W],
rowtime_index: int,
shift_timezone: str):
self._allowed_lateness = allowed_lateness
self._key_selector = key_selector
self._state_backend = state_backend
self._state_value_coder = state_value_coder
self._window_assigner = window_assigner
self._window_aggregator = window_aggregator
self._rowtime_index = rowtime_index
self._shift_timezone = shift_timezone
self._window_function = None # type: InternalWindowProcessFunction[K, W]
self._internal_timer_service = None # type: LegacyInternalTimerServiceImpl
self._window_context = None # type: WindowContext
self._trigger = trigger
self._trigger_context = None # type: TriggerContext
self._window_state = self._state_backend.get_value_state("window_state", state_value_coder)
def open(self, function_context: FunctionContext):
self._internal_timer_service = LegacyInternalTimerServiceImpl(self._state_backend)
self._window_aggregator.open(
PerWindowStateDataViewStore(function_context, self._state_backend))
if isinstance(self._window_assigner, PanedWindowAssigner):
self._window_function = PanedWindowProcessFunction(
self._allowed_lateness, self._window_assigner, self._window_aggregator)
elif isinstance(self._window_assigner, MergingWindowAssigner):
self._window_function = MergingWindowProcessFunction(
self._allowed_lateness, self._window_assigner, self._window_aggregator,
self._state_backend)
else:
self._window_function = GeneralWindowProcessFunction(
self._allowed_lateness, self._window_assigner, self._window_aggregator)
self._trigger_context = TriggerContext(
self._trigger, self._internal_timer_service, self._state_backend)
self._trigger_context.open()
self._window_context = WindowContext(
self, self._trigger_context, self._state_backend, self._state_value_coder,
self._internal_timer_service, self._window_assigner.is_event_time())
self._window_function.open(self._window_context)
def process_element(self, input_row: Row):
input_value = input_row._values
current_key = self._key_selector.get_key(input_value)
self._state_backend.set_current_key(current_key)
if self._window_assigner.is_event_time():
timestamp = input_value[self._rowtime_index]
seconds = int(timestamp.replace(tzinfo=datetime.timezone.utc).timestamp())
microseconds_of_second = timestamp.microsecond
milliseconds = seconds * 1000 + microseconds_of_second // 1000
timestamp = milliseconds
else:
timestamp = self._internal_timer_service.current_processing_time()
timestamp = self.to_utc_timestamp_mills(timestamp)
# the windows which the input row should be placed into
affected_windows = self._window_function.assign_state_namespace(input_value, timestamp)
for window in affected_windows:
self._window_state.set_current_namespace(window)
acc = self._window_state.value() # type: List
if acc is None:
acc = self._window_aggregator.create_accumulators()
self._window_aggregator.set_accumulators(window, acc)
if input_row._is_accumulate_msg():
self._window_aggregator.accumulate(input_row)
else:
self._window_aggregator.retract(input_row)
acc = self._window_aggregator.get_accumulators()
self._window_state.update(acc)
# the actual window which the input row is belongs to
actual_windows = self._window_function.assign_actual_windows(input_value, timestamp)
result = []
for window in actual_windows:
self._trigger_context.window = window
trigger_result = self._trigger_context.on_element(input_row, timestamp)
if trigger_result:
result.append(self._emit_window_result(current_key, window))
self._register_cleanup_timer(window)
return result
def process_watermark(self, watermark: int):
self._internal_timer_service.advance_watermark(watermark)
def on_event_time(self, timer: InternalTimer):
result = []
timestamp = timer.get_timestamp()
key = timer.get_key()
self._state_backend.set_current_key(key)
window = timer.get_namespace()
self._trigger_context.window = window
if self._trigger_context.on_event_time(timestamp):
# fire
result.append(self._emit_window_result(key, window))
if self._window_assigner.is_event_time():
self._window_function.clean_window_if_needed(window, timestamp)
return result
def on_processing_time(self, timer: InternalTimer):
result = []
timestamp = timer.get_timestamp()
key = timer.get_key()
self._state_backend.set_current_key(key)
window = timer.get_namespace()
self._trigger_context.window = window
if self._trigger_context.on_processing_time(timestamp):
# fire
result.append(self._emit_window_result(key, window))
if not self._window_assigner.is_event_time():
self._window_function.clean_window_if_needed(window, timestamp)
return result
def get_timers(self):
yield from self._internal_timer_service._timers.keys()
self._internal_timer_service._timers.clear()
def to_utc_timestamp_mills(self, epoch_mills):
if self._shift_timezone == "UTC":
return epoch_mills
else:
timezone = pytz.timezone(self._shift_timezone)
local_date_time = datetime.datetime.fromtimestamp(epoch_mills / 1000., timezone)\
.replace(tzinfo=None)
epoch = datetime.datetime.utcfromtimestamp(0)
return int((local_date_time - epoch).total_seconds() * 1000.0)
def close(self):
self._window_aggregator.close()
def _register_cleanup_timer(self, window: N):
cleanup_time = self.cleanup_time(window)
if cleanup_time == MAX_LONG_VALUE:
return
if self._window_assigner.is_event_time():
self._trigger_context.register_event_time_timer(cleanup_time)
else:
self._trigger_context.register_processing_time_timer(cleanup_time)
def cleanup_time(self, window: N) -> int:
if self._window_assigner.is_event_time():
cleanup_time = max(0, window.max_timestamp() + self._allowed_lateness)
if cleanup_time >= window.max_timestamp():
return cleanup_time
else:
return MAX_LONG_VALUE
else:
return max(0, window.max_timestamp())
@abstractmethod
def _emit_window_result(self, key: List, window: W):
pass
class GroupWindowAggFunction(GroupWindowAggFunctionBase[K, W]):
def __init__(self,
allowed_lateness: int,
key_selector: RowKeySelector,
state_backend,
state_value_coder,
window_assigner: WindowAssigner[W],
window_aggregator: NamespaceAggsHandleFunction[W],
trigger: Trigger[W],
rowtime_index: int,
shift_timezone: str):
super(GroupWindowAggFunction, self).__init__(
allowed_lateness, key_selector, state_backend, state_value_coder, window_assigner,
window_aggregator, trigger, rowtime_index, shift_timezone)
self._window_aggregator = window_aggregator
def _emit_window_result(self, key: List, window: W):
self._window_function.prepare_aggregate_accumulator_for_emit(window)
agg_result = self._window_aggregator.get_value(window)
result_row = join_row(key, agg_result)
# send INSERT
result_row.set_row_kind(RowKind.INSERT)
return result_row
| 20,522 | 42.115546 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/metrics/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/metrics/process/counter_impl.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.metrics import Counter
class CounterImpl(Counter):
def __init__(self, inner_counter):
self._inner_counter = inner_counter
def inc(self, n: int = 1):
"""
Increment the current count by the given value.
.. versionadded:: 1.11.0
"""
self._inner_counter.inc(n)
def dec(self, n: int = 1):
"""
Decrement the current count by 1.
.. versionadded:: 1.11.0
"""
self.inc(-n)
def get_count(self) -> int:
"""
Returns the current count.
.. versionadded:: 1.11.0
"""
from apache_beam.metrics.execution import MetricsEnvironment
container = MetricsEnvironment.current_container()
return container.get_counter(self._inner_counter.metric_name).get_cumulative()
| 1,782 | 34.66 | 86 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/metrics/process/distribution_impl.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.metrics import Distribution
class DistributionImpl(Distribution):
def __init__(self, inner_distribution):
self._inner_distribution = inner_distribution
def update(self, value):
"""
Updates the distribution value.
.. versionadded:: 1.11.0
"""
self._inner_distribution.update(value)
| 1,312 | 40.03125 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/metrics/process/meter_impl.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.metrics import Meter
class MeterImpl(Meter):
def __init__(self, inner_counter):
self._inner_counter = inner_counter
def mark_event(self, value: int = 1):
"""
Mark occurrence of the specified number of events.
.. versionadded:: 1.11.0
"""
self._inner_counter.inc(value)
def get_count(self) -> int:
"""
Get number of events marked on the meter.
.. versionadded:: 1.11.0
"""
from apache_beam.metrics.execution import MetricsEnvironment
container = MetricsEnvironment.current_container()
return container.get_counter(self._inner_counter.metric_name).get_cumulative()
| 1,657 | 37.55814 | 86 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/metrics/process/metric_impl.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import json
from enum import Enum
from typing import Callable, List, Tuple
from pyflink.fn_execution.metrics.process.counter_impl import CounterImpl
from pyflink.fn_execution.metrics.process.distribution_impl import DistributionImpl
from pyflink.fn_execution.metrics.process.meter_impl import MeterImpl
from pyflink.metrics import MetricGroup, Counter, Distribution, Meter
class MetricGroupType(Enum):
"""
Indicate the type of MetricGroup.
"""
generic = 0
key = 1
value = 2
class GenericMetricGroup(MetricGroup):
def __init__(
self,
parent,
name,
metric_group_type=MetricGroupType.generic):
self._parent = parent
self._sub_groups = []
self._name = name
self._metric_group_type = metric_group_type
self._flink_gauge = {}
self._beam_gauge = {}
def add_group(self, name: str, extra: str = None) -> 'MetricGroup':
if extra is None:
return self._add_group(name, MetricGroupType.generic)
else:
return self._add_group(name, MetricGroupType.key) \
._add_group(extra, MetricGroupType.value)
def counter(self, name: str) -> 'Counter':
from apache_beam.metrics.metric import Metrics
return CounterImpl(Metrics.counter(self._get_namespace(), name))
def gauge(self, name: str, obj: Callable[[], int]) -> None:
from apache_beam.metrics.metric import Metrics
self._flink_gauge[name] = obj
self._beam_gauge[name] = Metrics.gauge(self._get_namespace(), name)
def meter(self, name: str, time_span_in_seconds: int = 60) -> 'Meter':
from apache_beam.metrics.metric import Metrics
# There is no meter type in Beam, use counter to implement meter
return MeterImpl(Metrics.counter(self._get_namespace(time_span_in_seconds), name))
def distribution(self, name: str) -> 'Distribution':
from apache_beam.metrics.metric import Metrics
return DistributionImpl(Metrics.distribution(self._get_namespace(), name))
def _add_group(self, name: str, metric_group_type: MetricGroupType) -> 'GenericMetricGroup':
for group in self._sub_groups:
if name == group._name and metric_group_type == group._metric_group_type:
# we don't create same metric group repeatedly
return group
sub_group = GenericMetricGroup(
self,
name,
metric_group_type)
self._sub_groups.append(sub_group)
return sub_group
def _get_metric_group_names_and_types(self) -> Tuple[List[str], List[str]]:
if self._name is None:
return [], []
else:
names, types = self._parent._get_metric_group_names_and_types()
names.append(self._name)
types.append(str(self._metric_group_type))
return names, types
def _get_namespace(self, time=None) -> str:
names, metric_group_type = self._get_metric_group_names_and_types()
names.extend(metric_group_type)
if time is not None:
names.append(str(time))
return json.dumps(names)
| 4,138 | 38.798077 | 96 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/metrics/process/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/metrics/tests/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/metrics/tests/test_metric.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from apache_beam.metrics.cells import DistributionData
from apache_beam.metrics.execution import MetricsContainer
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.metrics.metricbase import MetricName
from apache_beam.runners.worker import statesampler
from apache_beam.utils import counters
from pyflink.fn_execution.metrics.process.metric_impl import GenericMetricGroup
from pyflink.metrics.metricbase import MetricGroup
from pyflink.table import FunctionContext
from pyflink.testing.test_case_utils import PyFlinkTestCase
class MetricTests(PyFlinkTestCase):
base_metric_group = GenericMetricGroup(None, None)
@staticmethod
def print_metric_group_path(mg: MetricGroup) -> str:
if mg._parent is None:
return 'root'
else:
return MetricTests.print_metric_group_path(mg._parent) + '.' + mg._name
def test_add_group(self):
new_group = MetricTests.base_metric_group.add_group('my_group')
self.assertEqual(MetricTests.print_metric_group_path(new_group), 'root.my_group')
def test_add_group_with_variable(self):
new_group = MetricTests.base_metric_group.add_group('key', 'value')
self.assertEqual(MetricTests.print_metric_group_path(new_group), 'root.key.value')
def test_metric_not_enabled(self):
fc = FunctionContext(None, None)
with self.assertRaises(RuntimeError):
fc.get_metric_group()
def test_get_metric_name(self):
new_group = MetricTests.base_metric_group.add_group('my_group')
self.assertEqual(
'["my_group", "MetricGroupType.generic"]',
new_group._get_namespace())
self.assertEqual(
'["my_group", "MetricGroupType.generic", "60"]',
new_group._get_namespace('60'))
def test_metrics(self):
sampler = statesampler.StateSampler('', counters.CounterFactory())
statesampler.set_current_tracker(sampler)
state1 = sampler.scoped_state(
'mystep', 'myState', metrics_container=MetricsContainer('mystep'))
try:
sampler.start()
with state1:
counter = MetricTests.base_metric_group.counter("my_counter")
meter = MetricTests.base_metric_group.meter("my_meter")
distribution = MetricTests.base_metric_group.distribution("my_distribution")
container = MetricsEnvironment.current_container()
self.assertEqual(0, counter.get_count())
self.assertEqual(0, meter.get_count())
self.assertEqual(
DistributionData(
0, 0, 0, 0), container.get_distribution(
MetricName(
'[]', 'my_distribution')).get_cumulative())
counter.inc(-2)
meter.mark_event(3)
distribution.update(10)
distribution.update(2)
self.assertEqual(-2, counter.get_count())
self.assertEqual(3, meter.get_count())
self.assertEqual(
DistributionData(
12, 2, 2, 10), container.get_distribution(
MetricName(
'[]', 'my_distribution')).get_cumulative())
finally:
sampler.stop()
| 4,322 | 43.112245 | 92 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/metrics/embedded/counter_impl.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.metrics import Counter
class CounterImpl(Counter):
def __init__(self, inner_counter):
self._inner_counter = inner_counter
def inc(self, n: int = 1):
"""
Increment the current count by the given value.
"""
self._inner_counter.inc(n)
def dec(self, n: int = 1):
"""
Decrement the current count by 1.
"""
self.inc(-n)
def get_count(self) -> int:
"""
Returns the current count.
"""
return self._inner_counter.getCount()
| 1,511 | 35 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/metrics/embedded/distribution_impl.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.metrics import Distribution
class DistributionImpl(Distribution):
def __init__(self, inner_distribution):
self._inner_distribution = inner_distribution
def update(self, value):
"""
Updates the distribution value.
"""
self._inner_distribution.update(value)
| 1,278 | 41.633333 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/metrics/embedded/meter_impl.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.metrics import Meter
class MeterImpl(Meter):
def __init__(self, inner_counter):
self._inner_counter = inner_counter
def mark_event(self, value: int = 1):
"""
Mark occurrence of the specified number of events.
"""
self._inner_counter.markEvent(value)
def get_count(self) -> int:
"""
Get number of events marked on the meter.
"""
return self._inner_counter.getCount()
| 1,426 | 37.567568 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/metrics/embedded/metric_impl.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Callable
from pemja import findClass
from pyflink.fn_execution.metrics.embedded.counter_impl import CounterImpl
from pyflink.fn_execution.metrics.embedded.distribution_impl import DistributionImpl
from pyflink.fn_execution.metrics.embedded.meter_impl import MeterImpl
from pyflink.metrics import MetricGroup, Counter, Distribution, Meter
JMeterView = findClass('org.apache.flink.metrics.MeterView')
JMetricGauge = findClass('org.apache.flink.python.metric.embedded.MetricGauge')
JMetricDistribution = findClass('org.apache.flink.python.metric.embedded.MetricDistribution')
class MetricGroupImpl(MetricGroup):
def __init__(self, metrics):
self._metrics = metrics
def add_group(self, name: str, extra: str = None) -> 'MetricGroup':
if extra is None:
return MetricGroupImpl(self._metrics.addGroup(name))
else:
return MetricGroupImpl(self._metrics.addGroup(name, extra))
def counter(self, name: str) -> 'Counter':
return CounterImpl(self._metrics.counter(name))
def gauge(self, name: str, obj: Callable[[], int]) -> None:
self._metrics.gauge(name, JMetricGauge(PythonGaugeCallable(obj)))
def meter(self, name: str, time_span_in_seconds: int = 60) -> 'Meter':
return MeterImpl(self._metrics.meter(name, JMeterView(time_span_in_seconds)))
def distribution(self, name: str) -> 'Distribution':
return DistributionImpl(self._metrics.gauge(name, JMetricDistribution()))
class PythonGaugeCallable(object):
def __init__(self, func: Callable):
self.func = func
def get_value(self):
return self.func()
| 2,607 | 41.064516 | 93 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/metrics/embedded/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/tests/test_coders.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""Tests common to all coder implementations."""
import decimal
import logging
import unittest
from pyflink.fn_execution.coders import BigIntCoder, TinyIntCoder, BooleanCoder, \
SmallIntCoder, IntCoder, FloatCoder, DoubleCoder, BinaryCoder, CharCoder, DateCoder, \
TimeCoder, TimestampCoder, GenericArrayCoder, MapCoder, DecimalCoder, FlattenRowCoder,\
RowCoder, LocalZonedTimestampCoder, BigDecimalCoder, TupleCoder, PrimitiveArrayCoder,\
TimeWindowCoder, CountWindowCoder, InstantCoder
from pyflink.datastream.window import TimeWindow, CountWindow
from pyflink.testing.test_case_utils import PyFlinkTestCase
class CodersTest(PyFlinkTestCase):
def check_coder(self, coder, *values):
coder_impl = coder.get_impl()
for v in values:
if isinstance(v, float):
from pyflink.table.tests.test_udf import float_equal
assert float_equal(v, coder_impl.decode(coder_impl.encode(v)), 1e-6)
else:
self.assertEqual(v, coder_impl.decode(coder_impl.encode(v)))
# decide whether two floats are equal
@staticmethod
def float_equal(a, b, rel_tol=1e-09, abs_tol=0.0):
return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def test_bigint_coder(self):
coder = BigIntCoder()
self.check_coder(coder, 1, 100, -100, -1000)
def test_tinyint_coder(self):
coder = TinyIntCoder()
self.check_coder(coder, 1, 10, 127, -128)
def test_boolean_coder(self):
coder = BooleanCoder()
self.check_coder(coder, True, False)
def test_smallint_coder(self):
coder = SmallIntCoder()
self.check_coder(coder, 32767, -32768, 0)
def test_int_coder(self):
coder = IntCoder()
self.check_coder(coder, -2147483648, 2147483647)
def test_float_coder(self):
coder = FloatCoder()
self.check_coder(coder, 1.02, 1.32)
def test_double_coder(self):
coder = DoubleCoder()
self.check_coder(coder, -12.02, 1.98932)
def test_binary_coder(self):
coder = BinaryCoder()
self.check_coder(coder, b'pyflink')
def test_char_coder(self):
coder = CharCoder()
self.check_coder(coder, 'flink', '🐿')
def test_date_coder(self):
import datetime
coder = DateCoder()
self.check_coder(coder, datetime.date(2019, 9, 10))
def test_time_coder(self):
import datetime
coder = TimeCoder()
self.check_coder(coder, datetime.time(hour=11, minute=11, second=11, microsecond=123000))
def test_timestamp_coder(self):
import datetime
coder = TimestampCoder(3)
self.check_coder(coder, datetime.datetime(2019, 9, 10, 18, 30, 20, 123000))
coder = TimestampCoder(6)
self.check_coder(coder, datetime.datetime(2019, 9, 10, 18, 30, 20, 123456))
def test_local_zoned_timestamp_coder(self):
import datetime
import pytz
timezone = pytz.timezone("Asia/Shanghai")
coder = LocalZonedTimestampCoder(3, timezone)
self.check_coder(coder,
timezone.localize(datetime.datetime(2019, 9, 10, 18, 30, 20, 123000)))
coder = LocalZonedTimestampCoder(6, timezone)
self.check_coder(coder,
timezone.localize(datetime.datetime(2019, 9, 10, 18, 30, 20, 123456)))
def test_instant_coder(self):
from pyflink.common.time import Instant
coder = InstantCoder()
self.check_coder(coder, Instant(100, 2000), None, Instant(-9223372036854775808, 0))
def test_array_coder(self):
element_coder = BigIntCoder()
coder = GenericArrayCoder(element_coder)
self.check_coder(coder, [1, 2, 3, None])
def test_primitive_array_coder(self):
element_coder = CharCoder()
coder = PrimitiveArrayCoder(element_coder)
self.check_coder(coder, ['hi', 'hello', 'flink'])
def test_map_coder(self):
key_coder = CharCoder()
value_coder = BigIntCoder()
coder = MapCoder(key_coder, value_coder)
self.check_coder(coder, {'flink': 1, 'pyflink': 2, 'coder': None})
def test_decimal_coder(self):
import decimal
coder = DecimalCoder(38, 18)
self.check_coder(coder, decimal.Decimal('0.00001'), decimal.Decimal('1.23E-8'))
coder = DecimalCoder(4, 3)
decimal.getcontext().prec = 2
self.check_coder(coder, decimal.Decimal('1.001'))
self.assertEqual(decimal.getcontext().prec, 2)
def test_flatten_row_coder(self):
field_coder = BigIntCoder()
field_count = 10
coder = FlattenRowCoder([field_coder for _ in range(field_count)]).get_impl()
v = [None if i % 2 == 0 else i for i in range(field_count)]
generator_result = coder.decode(coder.encode(v))
result = []
for item in generator_result:
result.append(item)
self.assertEqual(v, result)
def test_row_coder(self):
from pyflink.common import Row, RowKind
field_coder = BigIntCoder()
field_count = 10
field_names = ['f{}'.format(i) for i in range(field_count)]
coder = RowCoder([field_coder for _ in range(field_count)], field_names)
v = Row(**{field_names[i]: None if i % 2 == 0 else i for i in range(field_count)})
v.set_row_kind(RowKind.INSERT)
self.check_coder(coder, v)
v.set_row_kind(RowKind.UPDATE_BEFORE)
self.check_coder(coder, v)
v.set_row_kind(RowKind.UPDATE_AFTER)
self.check_coder(coder, v)
v.set_row_kind(RowKind.DELETE)
self.check_coder(coder, v)
coder = RowCoder([BigIntCoder(), CharCoder()], ['f1', 'f0'])
v = Row(f0="flink", f1=11)
self.check_coder(coder, v)
def test_basic_decimal_coder(self):
basic_dec_coder = BigDecimalCoder()
value = decimal.Decimal(1.200)
self.check_coder(basic_dec_coder, value)
def test_tuple_coder(self):
field_coders = [IntCoder(), CharCoder(), CharCoder()]
tuple_coder = TupleCoder(field_coders=field_coders)
data = (1, "Hello", "Hi")
self.check_coder(tuple_coder, data)
def test_window_coder(self):
coder = TimeWindowCoder()
self.check_coder(coder, TimeWindow(100, 1000))
coder = CountWindowCoder()
self.check_coder(coder, CountWindow(100))
def test_coder_with_unmatched_type(self):
from pyflink.common import Row
coder = FlattenRowCoder([BigIntCoder()])
with self.assertRaises(TypeError, msg='Expected list, got Row'):
self.check_coder(coder, Row(1))
if __name__ == '__main__':
logging.getLogger().setLevel(logging.INFO)
unittest.main()
| 7,729 | 37.267327 | 97 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/tests/test_process_mode_boot.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import time
import unittest
import grpc
from concurrent import futures
from apache_beam.portability.api.org.apache.beam.model.fn_execution.v1.beam_provision_api_pb2 \
import ProvisionInfo, GetProvisionInfoResponse
from apache_beam.portability.api.org.apache.beam.model.fn_execution.v1.beam_provision_api_pb2_grpc \
import ProvisionServiceServicer, add_ProvisionServiceServicer_to_server
from google.protobuf import json_format
from pyflink.java_gateway import get_gateway
from pyflink.pyflink_gateway_server import on_windows
from pyflink.testing.test_case_utils import PyFlinkTestCase
class PythonBootTests(PyFlinkTestCase):
def setUp(self):
provision_info = json_format.Parse('{"retrievalToken": "test_token"}', ProvisionInfo())
response = GetProvisionInfoResponse(info=provision_info)
def get_unused_port():
sock = socket.socket()
sock.bind(('', 0))
port = sock.getsockname()[1]
sock.close()
return port
class ProvisionService(ProvisionServiceServicer):
def GetProvisionInfo(self, request, context):
return response
def start_test_provision_server():
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
add_ProvisionServiceServicer_to_server(ProvisionService(), server)
port = get_unused_port()
server.add_insecure_port('[::]:' + str(port))
server.start()
return server, port
self.provision_server, self.provision_port = start_test_provision_server()
self.env = dict(os.environ)
self.env["python"] = sys.executable
self.env["FLINK_BOOT_TESTING"] = "1"
self.env["BOOT_LOG_DIR"] = os.path.join(self.env["FLINK_HOME"], "log")
self.tmp_dir = tempfile.mkdtemp(str(time.time()), dir=self.tempdir)
# assume that this file is in flink-python source code directory.
pyflink_package_dir = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
runner_script = "pyflink-udf-runner.bat" if on_windows() else \
"pyflink-udf-runner.sh"
self.runner_path = os.path.join(
pyflink_package_dir, "bin", runner_script)
def run_boot_py(self):
args = [self.runner_path, "--id", "1",
"--logging_endpoint", "localhost:0000",
"--artifact_endpoint", "whatever",
"--provision_endpoint", "localhost:%d" % self.provision_port,
"--control_endpoint", "localhost:0000",
"--semi_persist_dir", self.tmp_dir]
return subprocess.call(args, env=self.env)
def test_python_boot(self):
exit_code = self.run_boot_py()
self.assertTrue(exit_code == 0, "the boot.py exited with non-zero code.")
@unittest.skipIf(on_windows(), "'subprocess.check_output' in Windows always return empty "
"string, skip this test.")
def test_param_validation(self):
args = [self.runner_path]
exit_message = subprocess.check_output(args, env=self.env).decode("utf-8")
self.assertIn("No id provided.", exit_message)
args = [self.runner_path, "--id", "1"]
exit_message = subprocess.check_output(args, env=self.env).decode("utf-8")
self.assertIn("No provision endpoint provided.", exit_message)
def test_set_working_directory(self):
JProcessPythonEnvironmentManager = \
get_gateway().jvm.org.apache.flink.python.env.process.ProcessPythonEnvironmentManager
output_file = os.path.join(self.tmp_dir, "output.txt")
pyflink_dir = os.path.join(self.tmp_dir, "pyflink")
os.mkdir(pyflink_dir)
# just create an empty file
open(os.path.join(pyflink_dir, "__init__.py"), 'a').close()
fn_execution_dir = os.path.join(pyflink_dir, "fn_execution")
os.mkdir(fn_execution_dir)
open(os.path.join(fn_execution_dir, "__init__.py"), 'a').close()
beam_dir = os.path.join(fn_execution_dir, "beam")
os.mkdir(beam_dir)
open(os.path.join(beam_dir, "__init__.py"), 'a').close()
with open(os.path.join(beam_dir, "beam_boot.py"), "w") as f:
f.write("import os\nwith open(r'%s', 'w') as f:\n f.write(os.getcwd())" %
output_file)
# test if the name of working directory variable of udf runner is consist with
# ProcessPythonEnvironmentManager.
self.env[JProcessPythonEnvironmentManager.PYTHON_WORKING_DIR] = self.tmp_dir
self.env["python"] = sys.executable
args = [self.runner_path]
subprocess.check_output(args, env=self.env)
process_cwd = None
if os.path.exists(output_file):
with open(output_file, 'r') as f:
process_cwd = f.read()
self.assertEqual(os.path.realpath(self.tmp_dir),
process_cwd,
"setting working directory variable is not work!")
def tearDown(self):
self.provision_server.stop(0)
try:
if self.tmp_dir is not None:
shutil.rmtree(self.tmp_dir)
except:
pass
| 6,285 | 41.761905 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/tests/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/tests/test_flink_fn_execution_pb2.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import filecmp
import os
from pyflink.gen_protos import generate_proto_files
from pyflink.testing.test_case_utils import PyFlinkTestCase
class FlinkFnExecutionTests(PyFlinkTestCase):
"""
Tests whether flink_fn_exeution_pb2.py is synced with flink-fn-execution.proto.
"""
flink_fn_execution_pb2_file_name = "flink_fn_execution_pb2.py"
gen_protos_script = "gen_protos.py"
flink_fn_execution_proto_file_name = "flink-fn-execution.proto"
def test_flink_fn_execution_pb2_synced(self):
generate_proto_files('True', self.tempdir)
expected = os.path.join(self.tempdir, self.flink_fn_execution_pb2_file_name)
actual = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..',
self.flink_fn_execution_pb2_file_name)
self.assertTrue(filecmp.cmp(expected, actual),
'File %s should be re-generated by executing %s as %s has changed.'
% (self.flink_fn_execution_pb2_file_name,
self.gen_protos_script,
self.flink_fn_execution_proto_file_name))
def test_state_ttl_config_proto(self):
from pyflink.datastream.state import StateTtlConfig
from pyflink.common.time import Time
state_ttl_config = StateTtlConfig \
.new_builder(Time.milliseconds(1000)) \
.set_update_type(StateTtlConfig.UpdateType.OnCreateAndWrite) \
.set_state_visibility(StateTtlConfig.StateVisibility.NeverReturnExpired) \
.cleanup_full_snapshot() \
.cleanup_incrementally(10, True) \
.cleanup_in_rocksdb_compact_filter(1000) \
.build()
state_ttl_config_proto = state_ttl_config._to_proto()
state_ttl_config = StateTtlConfig._from_proto(state_ttl_config_proto)
self.assertEqual(state_ttl_config.get_ttl(), Time.milliseconds(1000))
self.assertEqual(
state_ttl_config.get_update_type(), StateTtlConfig.UpdateType.OnCreateAndWrite)
self.assertEqual(
state_ttl_config.get_state_visibility(),
StateTtlConfig.StateVisibility.NeverReturnExpired)
self.assertEqual(
state_ttl_config.get_ttl_time_characteristic(),
StateTtlConfig.TtlTimeCharacteristic.ProcessingTime)
cleanup_strategies = state_ttl_config.get_cleanup_strategies()
self.assertTrue(cleanup_strategies.is_cleanup_in_background())
self.assertTrue(cleanup_strategies.in_full_snapshot())
incremental_cleanup_strategy = cleanup_strategies.get_incremental_cleanup_strategy()
self.assertEqual(incremental_cleanup_strategy.get_cleanup_size(), 10)
self.assertTrue(incremental_cleanup_strategy.run_cleanup_for_every_record())
rocksdb_compact_filter_cleanup_strategy = \
cleanup_strategies.get_rocksdb_compact_filter_cleanup_strategy()
self.assertEqual(
rocksdb_compact_filter_cleanup_strategy.get_query_time_after_num_entries(), 1000)
| 4,000 | 49.0125 | 93 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/embedded/state_impl.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pemja import findClass
from pyflink.datastream.state import (ValueStateDescriptor, ListStateDescriptor, MapStateDescriptor,
StateDescriptor, ReducingStateDescriptor,
AggregatingStateDescriptor)
from pyflink.fn_execution.datastream.embedded.state_impl import (ValueStateImpl, ListStateImpl,
MapStateImpl, ReducingStateImpl,
AggregatingStateImpl)
from pyflink.fn_execution.embedded.converters import from_type_info
from pyflink.fn_execution.embedded.java_utils import to_java_state_descriptor
JVoidNamespace = findClass('org.apache.flink.runtime.state.VoidNamespace')
JVoidNamespaceSerializer = findClass('org.apache.flink.runtime.state.VoidNamespaceSerializer')
JVoidNamespace_INSTANCE = JVoidNamespace.INSTANCE
JVoidNamespaceSerializer_INSTANCE = JVoidNamespaceSerializer.INSTANCE
class KeyedStateBackend(object):
def __init__(self,
function_context,
keyed_state_backend,
window_serializer=JVoidNamespaceSerializer_INSTANCE,
window_converter=None):
self._function_context = function_context
self._keyed_state_backend = keyed_state_backend
self._window_serializer = window_serializer
self._window_converter = window_converter
def get_current_key(self):
return self._function_context.get_current_key()
def get_value_state(self, state_descriptor: ValueStateDescriptor) -> ValueStateImpl:
return ValueStateImpl(
self._get_or_create_keyed_state(state_descriptor),
from_type_info(state_descriptor.type_info),
self._window_converter)
def get_list_state(self, state_descriptor: ListStateDescriptor) -> ListStateImpl:
return ListStateImpl(
self._get_or_create_keyed_state(state_descriptor),
from_type_info(state_descriptor.type_info),
self._window_converter)
def get_map_state(self, state_descriptor: MapStateDescriptor) -> MapStateImpl:
return MapStateImpl(
self._get_or_create_keyed_state(state_descriptor),
from_type_info(state_descriptor.type_info),
self._window_converter)
def get_reducing_state(self, state_descriptor: ReducingStateDescriptor):
return ReducingStateImpl(
self._get_or_create_keyed_state(state_descriptor),
from_type_info(state_descriptor.type_info),
state_descriptor.get_reduce_function(),
self._window_converter)
def get_aggregating_state(self, state_descriptor: AggregatingStateDescriptor):
return AggregatingStateImpl(
self._get_or_create_keyed_state(state_descriptor),
from_type_info(state_descriptor.type_info),
state_descriptor.get_agg_function(),
self._window_converter)
def _get_or_create_keyed_state(self, state_descriptor: StateDescriptor):
return self._keyed_state_backend.getPartitionedState(
JVoidNamespace_INSTANCE,
self._window_serializer,
to_java_state_descriptor(state_descriptor))
| 4,222 | 46.988636 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/embedded/java_utils.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pemja import findClass
from pyflink.common.typeinfo import (TypeInformation, Types, BasicTypeInfo, BasicType,
PrimitiveArrayTypeInfo, BasicArrayTypeInfo,
ObjectArrayTypeInfo, MapTypeInfo)
from pyflink.datastream.state import (StateDescriptor, ValueStateDescriptor,
ReducingStateDescriptor,
AggregatingStateDescriptor, ListStateDescriptor,
MapStateDescriptor, StateTtlConfig)
# Java Types Class
JTypes = findClass('org.apache.flink.api.common.typeinfo.Types')
JPrimitiveArrayTypeInfo = findClass('org.apache.flink.api.common.typeinfo.PrimitiveArrayTypeInfo')
JBasicArrayTypeInfo = findClass('org.apache.flink.api.common.typeinfo.BasicArrayTypeInfo')
JPickledByteArrayTypeInfo = findClass('org.apache.flink.streaming.api.typeinfo.python.'
'PickledByteArrayTypeInfo')
JMapTypeInfo = findClass('org.apache.flink.api.java.typeutils.MapTypeInfo')
# Java State Descriptor Class
JValueStateDescriptor = findClass('org.apache.flink.api.common.state.ValueStateDescriptor')
JListStateDescriptor = findClass('org.apache.flink.api.common.state.ListStateDescriptor')
JMapStateDescriptor = findClass('org.apache.flink.api.common.state.MapStateDescriptor')
# Java StateTtlConfig
JStateTtlConfig = findClass('org.apache.flink.api.common.state.StateTtlConfig')
JTime = findClass('org.apache.flink.api.common.time.Time')
JUpdateType = findClass('org.apache.flink.api.common.state.StateTtlConfig$UpdateType')
JStateVisibility = findClass('org.apache.flink.api.common.state.StateTtlConfig$StateVisibility')
def to_java_typeinfo(type_info: TypeInformation):
if isinstance(type_info, BasicTypeInfo):
basic_type = type_info._basic_type
if basic_type == BasicType.STRING:
j_typeinfo = JTypes.STRING
elif basic_type == BasicType.BYTE:
j_typeinfo = JTypes.LONG
elif basic_type == BasicType.BOOLEAN:
j_typeinfo = JTypes.BOOLEAN
elif basic_type == BasicType.SHORT:
j_typeinfo = JTypes.LONG
elif basic_type == BasicType.INT:
j_typeinfo = JTypes.LONG
elif basic_type == BasicType.LONG:
j_typeinfo = JTypes.LONG
elif basic_type == BasicType.FLOAT:
j_typeinfo = JTypes.DOUBLE
elif basic_type == BasicType.DOUBLE:
j_typeinfo = JTypes.DOUBLE
elif basic_type == BasicType.CHAR:
j_typeinfo = JTypes.STRING
elif basic_type == BasicType.BIG_INT:
j_typeinfo = JTypes.BIG_INT
elif basic_type == BasicType.BIG_DEC:
j_typeinfo = JTypes.BIG_DEC
elif basic_type == BasicType.INSTANT:
j_typeinfo = JTypes.INSTANT
else:
raise TypeError("Invalid BasicType %s." % basic_type)
elif isinstance(type_info, PrimitiveArrayTypeInfo):
element_type = type_info._element_type
if element_type == Types.BOOLEAN():
j_typeinfo = JPrimitiveArrayTypeInfo.BOOLEAN_PRIMITIVE_ARRAY_TYPE_INFO
elif element_type == Types.BYTE():
j_typeinfo = JPrimitiveArrayTypeInfo.BYTE_PRIMITIVE_ARRAY_TYPE_INFO
elif element_type == Types.SHORT():
j_typeinfo = JPrimitiveArrayTypeInfo.SHORT_PRIMITIVE_ARRAY_TYPE_INFO
elif element_type == Types.INT():
j_typeinfo = JPrimitiveArrayTypeInfo.INT_PRIMITIVE_ARRAY_TYPE_INFO
elif element_type == Types.LONG():
j_typeinfo = JPrimitiveArrayTypeInfo.LONG_PRIMITIVE_ARRAY_TYPE_INFO
elif element_type == Types.FLOAT():
j_typeinfo = JPrimitiveArrayTypeInfo.FLOAT_PRIMITIVE_ARRAY_TYPE_INFO
elif element_type == Types.DOUBLE():
j_typeinfo = JPrimitiveArrayTypeInfo.DOUBLE_PRIMITIVE_ARRAY_TYPE_INFO
elif element_type == Types.CHAR():
j_typeinfo = JPrimitiveArrayTypeInfo.CHAR_PRIMITIVE_ARRAY_TYPE_INFO
else:
raise TypeError("Invalid element type for a primitive array.")
elif isinstance(type_info, BasicArrayTypeInfo):
element_type = type_info._element_type
if element_type == Types.BOOLEAN():
j_typeinfo = JBasicArrayTypeInfo.BOOLEAN_ARRAY_TYPE_INFO
elif element_type == Types.BYTE():
j_typeinfo = JBasicArrayTypeInfo.BYTE_ARRAY_TYPE_INFO
elif element_type == Types.SHORT():
j_typeinfo = JBasicArrayTypeInfo.SHORT_ARRAY_TYPE_INFO
elif element_type == Types.INT():
j_typeinfo = JBasicArrayTypeInfo.INT_ARRAY_TYPE_INFO
elif element_type == Types.LONG():
j_typeinfo = JBasicArrayTypeInfo.LONG_ARRAY_TYPE_INFO
elif element_type == Types.FLOAT():
j_typeinfo = JBasicArrayTypeInfo.FLOAT_ARRAY_TYPE_INFO
elif element_type == Types.DOUBLE():
j_typeinfo = JBasicArrayTypeInfo.DOUBLE_ARRAY_TYPE_INFO
elif element_type == Types.CHAR():
j_typeinfo = JBasicArrayTypeInfo.CHAR_ARRAY_TYPE_INFO
elif element_type == Types.STRING():
j_typeinfo = JBasicArrayTypeInfo.STRING_ARRAY_TYPE_INFO
else:
raise TypeError("Invalid element type for a basic array.")
elif isinstance(type_info, ObjectArrayTypeInfo):
element_type = type_info._element_type
j_typeinfo = JTypes.OBJECT_ARRAY(to_java_typeinfo(element_type))
elif isinstance(type_info, MapTypeInfo):
j_key_typeinfo = to_java_typeinfo(type_info._key_type_info)
j_value_typeinfo = to_java_typeinfo(type_info._value_type_info)
j_typeinfo = JMapTypeInfo(j_key_typeinfo, j_value_typeinfo)
else:
j_typeinfo = JPickledByteArrayTypeInfo.PICKLED_BYTE_ARRAY_TYPE_INFO
return j_typeinfo
def to_java_state_ttl_config(ttl_config: StateTtlConfig):
j_ttl_config_builder = JStateTtlConfig.newBuilder(
JTime.milliseconds(ttl_config.get_ttl().to_milliseconds()))
update_type = ttl_config.get_update_type()
if update_type == StateTtlConfig.UpdateType.Disabled:
j_ttl_config_builder.setUpdateType(JUpdateType.Disabled)
elif update_type == StateTtlConfig.UpdateType.OnCreateAndWrite:
j_ttl_config_builder.setUpdateType(JUpdateType.OnCreateAndWrite)
elif update_type == StateTtlConfig.UpdateType.OnReadAndWrite:
j_ttl_config_builder.setUpdateType(JUpdateType.OnReadAndWrite)
state_visibility = ttl_config.get_state_visibility()
if state_visibility == StateTtlConfig.StateVisibility.ReturnExpiredIfNotCleanedUp:
j_ttl_config_builder.setStateVisibility(JStateVisibility.ReturnExpiredIfNotCleanedUp)
elif state_visibility == StateTtlConfig.StateVisibility.NeverReturnExpired:
j_ttl_config_builder.setStateVisibility(JStateVisibility.NeverReturnExpired)
cleanup_strategies = ttl_config.get_cleanup_strategies()
if not cleanup_strategies.is_cleanup_in_background():
j_ttl_config_builder.disableCleanupInBackground()
if cleanup_strategies.in_full_snapshot():
j_ttl_config_builder.cleanupFullSnapshot()
incremental_cleanup_strategy = cleanup_strategies.get_incremental_cleanup_strategy()
if incremental_cleanup_strategy:
j_ttl_config_builder.cleanupIncrementally(
incremental_cleanup_strategy.get_cleanup_size(),
incremental_cleanup_strategy.run_cleanup_for_every_record())
rocksdb_compact_filter_cleanup_strategy = \
cleanup_strategies.get_rocksdb_compact_filter_cleanup_strategy()
if rocksdb_compact_filter_cleanup_strategy:
j_ttl_config_builder.cleanupInRocksdbCompactFilter(
rocksdb_compact_filter_cleanup_strategy.get_query_time_after_num_entries())
return j_ttl_config_builder.build()
def to_java_state_descriptor(state_descriptor: StateDescriptor):
if isinstance(state_descriptor,
(ValueStateDescriptor, ReducingStateDescriptor, AggregatingStateDescriptor)):
value_type_info = to_java_typeinfo(state_descriptor.type_info)
j_state_descriptor = JValueStateDescriptor(state_descriptor.name, value_type_info)
elif isinstance(state_descriptor, ListStateDescriptor):
element_type_info = to_java_typeinfo(state_descriptor.type_info.elem_type)
j_state_descriptor = JListStateDescriptor(state_descriptor.name, element_type_info)
elif isinstance(state_descriptor, MapStateDescriptor):
key_type_info = to_java_typeinfo(state_descriptor.type_info._key_type_info)
value_type_info = to_java_typeinfo(state_descriptor.type_info._value_type_info)
j_state_descriptor = JMapStateDescriptor(
state_descriptor.name, key_type_info, value_type_info)
else:
raise Exception("Unknown supported state_descriptor {0}".format(state_descriptor))
if state_descriptor._ttl_config:
j_state_ttl_config = to_java_state_ttl_config(state_descriptor._ttl_config)
j_state_descriptor.enableTimeToLive(j_state_ttl_config)
return j_state_descriptor
| 10,051 | 48.034146 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/embedded/operation_utils.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.fn_execution.embedded.operations import (OneInputFunctionOperation,
TwoInputFunctionOperation)
from pyflink.fn_execution.embedded.converters import from_type_info_proto, from_schema_proto
def pare_user_defined_data_stream_function_proto(proto):
from pyflink.fn_execution import flink_fn_execution_pb2
serialized_fn = flink_fn_execution_pb2.UserDefinedDataStreamFunction()
serialized_fn.ParseFromString(proto)
return serialized_fn
def parse_coder_proto(proto):
from pyflink.fn_execution import flink_fn_execution_pb2
coder = flink_fn_execution_pb2.CoderInfoDescriptor()
coder.ParseFromString(proto)
return coder
def parse_function_proto(proto):
from pyflink.fn_execution import flink_fn_execution_pb2
serialized_fn = flink_fn_execution_pb2.UserDefinedFunctions()
serialized_fn.ParseFromString(proto)
return serialized_fn
def create_scalar_operation_from_proto(proto,
input_coder_info,
output_coder_into,
one_arg_optimization=False,
one_result_optimization=False):
from pyflink.fn_execution.table.operations import ScalarFunctionOperation
serialized_fn = parse_function_proto(proto)
input_data_converter = (
from_schema_proto(
parse_coder_proto(input_coder_info).flatten_row_type.schema,
one_arg_optimization))
output_data_converter = (
from_schema_proto(
parse_coder_proto(output_coder_into).flatten_row_type.schema,
one_result_optimization))
scalar_operation = ScalarFunctionOperation(
serialized_fn, one_arg_optimization, one_result_optimization)
process_element_func = scalar_operation.process_element
def process_element(value):
actual_value = input_data_converter.to_internal(value)
result = process_element_func(actual_value)
return output_data_converter.to_external(result)
scalar_operation.process_element = process_element
return scalar_operation
def create_table_operation_from_proto(proto, input_coder_info, output_coder_into):
from pyflink.fn_execution.table.operations import TableFunctionOperation
serialized_fn = parse_function_proto(proto)
input_data_converter = (
from_schema_proto(parse_coder_proto(input_coder_info).flatten_row_type.schema))
output_data_converter = (
from_schema_proto(parse_coder_proto(output_coder_into).flatten_row_type.schema))
table_operation = TableFunctionOperation(serialized_fn)
process_element_func = table_operation.process_element
def process_element(value):
actual_value = input_data_converter.to_internal(value)
results = process_element_func(actual_value)
for result in results:
yield output_data_converter.to_external(result)
table_operation.process_element = process_element
return table_operation
def create_one_input_user_defined_data_stream_function_from_protos(
function_infos, input_coder_info, output_coder_info, runtime_context,
function_context, timer_context, side_output_context, job_parameters, keyed_state_backend,
operator_state_backend):
serialized_fns = [pare_user_defined_data_stream_function_proto(proto)
for proto in function_infos]
input_data_converter = (
from_type_info_proto(parse_coder_proto(input_coder_info).raw_type.type_info))
output_data_converter = (
from_type_info_proto(parse_coder_proto(output_coder_info).raw_type.type_info))
function_operation = OneInputFunctionOperation(
serialized_fns,
input_data_converter,
output_data_converter,
runtime_context,
function_context,
timer_context,
side_output_context,
job_parameters,
keyed_state_backend,
operator_state_backend)
return function_operation
def create_two_input_user_defined_data_stream_function_from_protos(
function_infos, input_coder_info1, input_coder_info2, output_coder_info, runtime_context,
function_context, timer_context, side_output_context, job_parameters, keyed_state_backend,
operator_state_backend):
serialized_fns = [pare_user_defined_data_stream_function_proto(proto)
for proto in function_infos]
input_data_converter1 = (
from_type_info_proto(parse_coder_proto(input_coder_info1).raw_type.type_info))
input_data_converter2 = (
from_type_info_proto(parse_coder_proto(input_coder_info2).raw_type.type_info))
output_data_converter = (
from_type_info_proto(parse_coder_proto(output_coder_info).raw_type.type_info))
function_operation = TwoInputFunctionOperation(
serialized_fns,
input_data_converter1,
input_data_converter2,
output_data_converter,
runtime_context,
function_context,
timer_context,
side_output_context,
job_parameters,
keyed_state_backend,
operator_state_backend)
return function_operation
| 6,187 | 37.675 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/embedded/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/embedded/operations.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from pyflink.fn_execution.datastream.embedded.operations import extract_process_function
from pyflink.fn_execution.embedded.converters import DataConverter
class FunctionOperation(object):
def __init__(self,
operations,
output_data_converter: DataConverter):
self._operations = operations
self._output_data_converter = output_data_converter
self._main_operation = operations[0]
self._chained_operations = operations[1:]
def open(self):
for operation in self._operations:
operation.open()
def close(self):
for operation in self._operations:
operation.close()
def on_timer(self, timestamp):
results = self._main_operation.on_timer(timestamp)
if results:
results = self._process_elements(results)
yield from self._output_elements(results)
def _process_elements(self, elements):
def _process_elements_on_operation(op, items):
for item in items:
yield from op.process_element(item)
for operation in self._chained_operations:
elements = _process_elements_on_operation(operation, elements)
return elements
def _output_elements(self, elements):
for item in elements:
yield self._output_data_converter.to_external(item)
class OneInputFunctionOperation(FunctionOperation):
def __init__(self,
serialized_fns,
input_data_converter: DataConverter,
output_data_converter: DataConverter,
runtime_context,
function_context,
timer_context,
side_output_context,
job_parameters,
keyed_state_backend,
operator_state_backend):
operations = (
[extract_process_function(
serialized_fn,
runtime_context,
function_context,
timer_context,
side_output_context,
job_parameters,
keyed_state_backend,
operator_state_backend)
for serialized_fn in serialized_fns])
super(OneInputFunctionOperation, self).__init__(operations, output_data_converter)
self._input_data_converter = input_data_converter
def process_element(self, value):
results = self._main_operation.process_element(
self._input_data_converter.to_internal(value))
results = self._process_elements(results)
yield from self._output_elements(results)
class TwoInputFunctionOperation(FunctionOperation):
def __init__(self,
serialized_fns,
input_data_converter1: DataConverter,
input_data_converter2: DataConverter,
output_data_converter: DataConverter,
runtime_context,
function_context,
timer_context,
side_output_context,
job_parameters,
keyed_state_backend,
operator_state_backend):
operations = (
[extract_process_function(
serialized_fn,
runtime_context,
function_context,
timer_context,
side_output_context,
job_parameters,
keyed_state_backend,
operator_state_backend)
for serialized_fn in serialized_fns])
super(TwoInputFunctionOperation, self).__init__(operations, output_data_converter)
self._input_data_converter1 = input_data_converter1
self._input_data_converter2 = input_data_converter2
self._main_operation = operations[0]
self._other_operations = operations[1:]
def process_element1(self, value):
results = self._main_operation.process_element1(
self._input_data_converter1.to_internal(value))
results = self._process_elements(results)
yield from self._output_elements(results)
def process_element2(self, value):
results = self._main_operation.process_element2(
self._input_data_converter2.to_internal(value))
results = self._process_elements(results)
yield from self._output_elements(results)
| 5,340 | 36.879433 | 90 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/embedded/converters.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import pickle
from abc import ABC, abstractmethod
from typing import TypeVar, List, Tuple
from pemja import findClass
from pyflink.common import Row, RowKind, TypeInformation
from pyflink.common.typeinfo import (PickledBytesTypeInfo, PrimitiveArrayTypeInfo,
BasicArrayTypeInfo, ObjectArrayTypeInfo, RowTypeInfo,
TupleTypeInfo, MapTypeInfo, ListTypeInfo)
from pyflink.datastream import TimeWindow, CountWindow, GlobalWindow
IN = TypeVar('IN')
OUT = TypeVar('OUT')
# Java Window
JTimeWindow = findClass('org.apache.flink.table.runtime.operators.window.TimeWindow')
JCountWindow = findClass('org.apache.flink.table.runtime.operators.window.CountWindow')
JGlobalWindow = findClass('org.apache.flink.streaming.api.windowing.windows.GlobalWindow')
class DataConverter(ABC):
@abstractmethod
def to_internal(self, value) -> IN:
pass
@abstractmethod
def to_external(self, value) -> OUT:
pass
def __eq__(self, other):
return type(self) == type(other)
class IdentityDataConverter(DataConverter):
def to_internal(self, value) -> IN:
return value
def to_external(self, value) -> OUT:
return value
class PickleDataConverter(DataConverter):
def to_internal(self, value) -> IN:
if value is None:
return None
return pickle.loads(value)
def to_external(self, value) -> OUT:
if value is None:
return None
return pickle.dumps(value)
class FlattenRowDataConverter(DataConverter):
def __init__(self, field_data_converters: List[DataConverter]):
self._field_data_converters = field_data_converters
def to_internal(self, value) -> IN:
if value is None:
return None
return tuple([self._field_data_converters[i].to_internal(item)
for i, item in enumerate(value)])
def to_external(self, value) -> OUT:
if value is None:
return None
return tuple([self._field_data_converters[i].to_external(item)
for i, item in enumerate(value)])
class RowDataConverter(DataConverter):
def __init__(self, field_data_converters: List[DataConverter], field_names: List[str]):
self._field_data_converters = field_data_converters
self._field_names = field_names
def to_internal(self, value) -> IN:
if value is None:
return None
row = Row()
row._values = [self._field_data_converters[i].to_internal(item)
for i, item in enumerate(value[1])]
row.set_field_names(self._field_names)
row.set_row_kind(RowKind(value[0]))
return row
def to_external(self, value: Row) -> OUT:
if value is None:
return None
values = value._values
fields = tuple([self._field_data_converters[i].to_external(values[i])
for i in range(len(values))])
return value.get_row_kind().value, fields
class TupleDataConverter(DataConverter):
def __init__(self, field_data_converters: List[DataConverter]):
self._field_data_converters = field_data_converters
def to_internal(self, value) -> IN:
if value is None:
return None
return tuple([self._field_data_converters[i].to_internal(item)
for i, item in enumerate(value)])
def to_external(self, value: Tuple) -> OUT:
if value is None:
return None
return tuple([self._field_data_converters[i].to_external(item)
for i, item in enumerate(value)])
class ListDataConverter(DataConverter):
def __init__(self, field_converter: DataConverter):
self._field_converter = field_converter
def to_internal(self, value) -> IN:
if value is None:
return None
return [self._field_converter.to_internal(item) for item in value]
def to_external(self, value) -> OUT:
if value is None:
return None
return [self._field_converter.to_external(item) for item in value]
class ArrayDataConverter(ListDataConverter):
def __init__(self, field_converter: DataConverter):
super(ArrayDataConverter, self).__init__(field_converter)
def to_internal(self, value) -> IN:
return tuple(super(ArrayDataConverter, self).to_internal(value))
def to_external(self, value) -> OUT:
return tuple(super(ArrayDataConverter, self).to_external(value))
class DictDataConverter(DataConverter):
def __init__(self, key_converter: DataConverter, value_converter: DataConverter):
self._key_converter = key_converter
self._value_converter = value_converter
def to_internal(self, value) -> IN:
if value is None:
return None
return {self._key_converter.to_internal(k): self._value_converter.to_internal(v)
for k, v in value.items()}
def to_external(self, value) -> OUT:
if value is None:
return None
return {self._key_converter.to_external(k): self._value_converter.to_external(v)
for k, v in value.items()}
class TimeWindowConverter(DataConverter):
def to_internal(self, value) -> TimeWindow:
return TimeWindow(value.getStart(), value.getEnd())
def to_external(self, value: TimeWindow) -> OUT:
return JTimeWindow(value.start, value.end)
class CountWindowConverter(DataConverter):
def to_internal(self, value) -> CountWindow:
return CountWindow(value.getId())
def to_external(self, value: CountWindow) -> OUT:
return JCountWindow(value.id)
class GlobalWindowConverter(DataConverter):
def to_internal(self, value) -> IN:
return GlobalWindow()
def to_external(self, value) -> OUT:
return JGlobalWindow.get()
def from_type_info_proto(type_info):
# for data stream type information.
from pyflink.fn_execution import flink_fn_execution_pb2
type_info_name = flink_fn_execution_pb2.TypeInfo
type_name = type_info.type_name
if type_name == type_info_name.PICKLED_BYTES:
return PickleDataConverter()
elif type_name == type_info_name.ROW:
return RowDataConverter(
[from_type_info_proto(f.field_type) for f in type_info.row_type_info.fields],
[f.field_name for f in type_info.row_type_info.fields])
elif type_name == type_info_name.TUPLE:
return TupleDataConverter(
[from_type_info_proto(field_type)
for field_type in type_info.tuple_type_info.field_types])
elif type_name in (type_info_name.BASIC_ARRAY,
type_info_name.OBJECT_ARRAY):
return ArrayDataConverter(from_type_info_proto(type_info.collection_element_type))
elif type_info == type_info_name.LIST:
return ListDataConverter(from_type_info_proto(type_info.collection_element_type))
elif type_name == type_info_name.MAP:
return DictDataConverter(from_type_info_proto(type_info.map_type_info.key_type),
from_type_info_proto(type_info.map_type_info.value_type))
return IdentityDataConverter()
def from_schema_proto(schema, one_arg_optimized=False):
field_converters = [from_field_type_proto(f.type) for f in schema.fields]
if one_arg_optimized and len(field_converters) == 1:
return field_converters[0]
else:
return FlattenRowDataConverter(field_converters)
def from_field_type_proto(field_type):
from pyflink.fn_execution import flink_fn_execution_pb2
schema_type_name = flink_fn_execution_pb2.Schema
type_name = field_type.type_name
if type_name == schema_type_name.ROW:
return RowDataConverter(
[from_field_type_proto(f.type) for f in field_type.row_schema.fields],
[f.name for f in field_type.row_schema.fields])
elif type_name == schema_type_name.BASIC_ARRAY:
return ArrayDataConverter(from_field_type_proto(field_type.collection_element_type))
elif type_name == schema_type_name.MAP:
return DictDataConverter(from_field_type_proto(field_type.map_info.key_type),
from_field_type_proto(field_type.map_info.value_type))
return IdentityDataConverter()
def from_type_info(type_info: TypeInformation):
if isinstance(type_info, (PickledBytesTypeInfo, RowTypeInfo, TupleTypeInfo)):
return PickleDataConverter()
elif isinstance(type_info, (PrimitiveArrayTypeInfo, BasicArrayTypeInfo, ObjectArrayTypeInfo)):
return ArrayDataConverter(from_type_info(type_info._element_type))
elif isinstance(type_info, ListTypeInfo):
return ListDataConverter(from_type_info(type_info.elem_type))
elif isinstance(type_info, MapTypeInfo):
return DictDataConverter(from_type_info(type_info._key_type_info),
from_type_info(type_info._value_type_info))
return IdentityDataConverter()
| 9,985 | 34.038596 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/utils/operation_utils.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import datetime
import threading
import time
from collections.abc import Generator
from functools import partial
from typing import Any, Tuple, Dict, List
from pyflink.common import Row
from pyflink.fn_execution import pickle
from pyflink.serializers import PickleSerializer
from pyflink.table import functions
from pyflink.table.udf import DelegationTableFunction, DelegatingScalarFunction, \
ImperativeAggregateFunction, PandasAggregateFunctionWrapper
_func_num = 0
_constant_num = 0
def normalize_table_function_result(it):
def normalize_one_row(value):
if isinstance(value, tuple):
# We assume that tuple is a single line output
return [*value]
elif isinstance(value, Row):
# We assume that tuple is a single line output
return value._values
else:
# single field value
return [value]
if it is None:
def func():
for i in []:
yield i
return func()
if isinstance(it, (list, range, Generator)):
def func():
for item in it:
yield normalize_one_row(item)
return func()
else:
def func():
yield normalize_one_row(it)
return func()
def normalize_pandas_result(it):
import pandas as pd
arrays = []
for result in it:
if isinstance(result, (Row, Tuple)):
arrays.append(pd.concat([pd.Series([item]) for item in result], axis=1))
else:
arrays.append(pd.Series([result]))
return arrays
def wrap_input_series_as_dataframe(*args):
import pandas as pd
return pd.concat(args, axis=1)
def check_pandas_udf_result(f, *input_args):
output = f(*input_args)
import pandas as pd
assert type(output) == pd.Series or type(output) == pd.DataFrame, \
"The result type of Pandas UDF '%s' must be pandas.Series or pandas.DataFrame, got %s" \
% (f.__name__, type(output))
assert len(output) == len(input_args[0]), \
"The result length '%d' of Pandas UDF '%s' is not equal to the input length '%d'" \
% (len(output), f.__name__, len(input_args[0]))
return output
def extract_over_window_user_defined_function(user_defined_function_proto):
window_index = user_defined_function_proto.window_index
return (*extract_user_defined_function(user_defined_function_proto, True), window_index)
def extract_user_defined_function(user_defined_function_proto, pandas_udaf=False,
one_arg_optimization=False)\
-> Tuple[str, Dict, List]:
"""
Extracts user-defined-function from the proto representation of a
:class:`UserDefinedFunction`.
:param user_defined_function_proto: the proto representation of the Python
:param pandas_udaf: whether the user_defined_function_proto is pandas udaf
:param one_arg_optimization: whether the optimization enabled
:class:`UserDefinedFunction`
"""
def _next_func_num():
global _func_num
_func_num = _func_num + 1
return _func_num
def _extract_input(args) -> Tuple[str, Dict, List]:
local_variable_dict = {}
local_funcs = []
args_str = []
for arg in args:
if arg.HasField("udf"):
# for chaining Python UDF input: the input argument is a Python ScalarFunction
udf_arg, udf_variable_dict, udf_funcs = extract_user_defined_function(
arg.udf, one_arg_optimization=one_arg_optimization)
args_str.append(udf_arg)
local_variable_dict.update(udf_variable_dict)
local_funcs.extend(udf_funcs)
elif arg.HasField("inputOffset"):
if one_arg_optimization:
args_str.append("value")
else:
# the input argument is a column of the input row
args_str.append("value[%s]" % arg.inputOffset)
else:
# the input argument is a constant value
constant_value_name, parsed_constant_value = \
_parse_constant_value(arg.inputConstant)
args_str.append(constant_value_name)
local_variable_dict[constant_value_name] = parsed_constant_value
return ",".join(args_str), local_variable_dict, local_funcs
variable_dict = {}
user_defined_funcs = []
user_defined_func = pickle.loads(user_defined_function_proto.payload)
if pandas_udaf:
user_defined_func = PandasAggregateFunctionWrapper(user_defined_func)
func_name = 'f%s' % _next_func_num()
if isinstance(user_defined_func, DelegatingScalarFunction) \
or isinstance(user_defined_func, DelegationTableFunction):
if user_defined_function_proto.is_pandas_udf:
variable_dict[func_name] = partial(check_pandas_udf_result, user_defined_func.func)
else:
variable_dict[func_name] = user_defined_func.func
else:
variable_dict[func_name] = user_defined_func.eval
user_defined_funcs.append(user_defined_func)
func_args, input_variable_dict, input_funcs = _extract_input(user_defined_function_proto.inputs)
variable_dict.update(input_variable_dict)
user_defined_funcs.extend(input_funcs)
if user_defined_function_proto.takes_row_as_input:
if input_variable_dict:
# for constant or other udfs as input arguments.
func_str = "%s(%s)" % (func_name, func_args)
elif user_defined_function_proto.is_pandas_udf or pandas_udaf:
# for pandas udf/udaf, the input data structure is a List of Pandas.Series
# we need to merge these Pandas.Series into a Pandas.DataFrame
variable_dict['wrap_input_series_as_dataframe'] = wrap_input_series_as_dataframe
func_str = "%s(wrap_input_series_as_dataframe(%s))" % (func_name, func_args)
else:
# directly use `value` as input argument
# e.g.
# lambda value: Row(value[0], value[1])
# can be optimized to
# lambda value: value
func_str = "%s(value)" % func_name
else:
func_str = "%s(%s)" % (func_name, func_args)
return func_str, variable_dict, user_defined_funcs
def _parse_constant_value(constant_value) -> Tuple[str, Any]:
j_type = constant_value[0]
serializer = PickleSerializer()
pickled_data = serializer.loads(constant_value[1:])
# the type set contains
# TINYINT,SMALLINT,INTEGER,BIGINT,FLOAT,DOUBLE,DECIMAL,CHAR,VARCHAR,NULL,BOOLEAN
# the pickled_data doesn't need to transfer to anther python object
if j_type == 0:
parsed_constant_value = pickled_data
# the type is DATE
elif j_type == 1:
parsed_constant_value = \
datetime.date(year=1970, month=1, day=1) + datetime.timedelta(days=pickled_data)
# the type is TIME
elif j_type == 2:
seconds, milliseconds = divmod(pickled_data, 1000)
minutes, seconds = divmod(seconds, 60)
hours, minutes = divmod(minutes, 60)
parsed_constant_value = datetime.time(hours, minutes, seconds, milliseconds * 1000)
# the type is TIMESTAMP
elif j_type == 3:
parsed_constant_value = \
datetime.datetime(year=1970, month=1, day=1, hour=0, minute=0, second=0) \
+ datetime.timedelta(milliseconds=pickled_data)
else:
raise Exception("Unknown type %s, should never happen" % str(j_type))
def _next_constant_num():
global _constant_num
_constant_num = _constant_num + 1
return _constant_num
constant_value_name = 'c%s' % _next_constant_num()
return constant_value_name, parsed_constant_value
def extract_user_defined_aggregate_function(
current_index,
user_defined_function_proto,
distinct_info_dict: Dict[Tuple[List[str]], Tuple[List[int], List[int]]]):
user_defined_agg = load_aggregate_function(user_defined_function_proto.payload)
assert isinstance(user_defined_agg, ImperativeAggregateFunction)
args_str = []
local_variable_dict = {}
for arg in user_defined_function_proto.inputs:
if arg.HasField("inputOffset"):
# the input argument is a column of the input row
args_str.append("value[%s]" % arg.inputOffset)
else:
# the input argument is a constant value
constant_value_name, parsed_constant_value = \
_parse_constant_value(arg.inputConstant)
for key, value in local_variable_dict.items():
if value == parsed_constant_value:
constant_value_name = key
break
if constant_value_name not in local_variable_dict:
local_variable_dict[constant_value_name] = parsed_constant_value
args_str.append(constant_value_name)
if user_defined_function_proto.distinct:
if tuple(args_str) in distinct_info_dict:
distinct_info_dict[tuple(args_str)][0].append(current_index)
distinct_info_dict[tuple(args_str)][1].append(user_defined_function_proto.filter_arg)
distinct_index = distinct_info_dict[tuple(args_str)][0][0]
else:
distinct_info_dict[tuple(args_str)] = \
([current_index], [user_defined_function_proto.filter_arg])
distinct_index = current_index
else:
distinct_index = -1
if user_defined_function_proto.takes_row_as_input and not local_variable_dict:
# directly use `value` as input argument
# e.g.
# lambda value: Row(value[0], value[1])
# can be optimized to
# lambda value: value
func_str = "lambda value : [value]"
else:
func_str = "lambda value : (%s,)" % ",".join(args_str)
return user_defined_agg, \
eval(func_str, local_variable_dict) \
if args_str else lambda v: tuple(), \
user_defined_function_proto.filter_arg, \
distinct_index
def is_built_in_function(payload):
# The payload may be a pickled bytes or the class name of the built-in functions.
# If it represents a built-in function, it will start with 0x00.
# If it is a pickled bytes, it will start with 0x80.
return payload[0] == 0
def load_aggregate_function(payload):
if is_built_in_function(payload):
built_in_function_class_name = payload[1:].decode("utf-8")
cls = getattr(functions, built_in_function_class_name)
return cls()
else:
return pickle.loads(payload)
class PeriodicThread(threading.Thread):
"""Call a function periodically with the specified number of seconds"""
def __init__(self,
interval,
function,
args=None,
kwargs=None
) -> None:
threading.Thread.__init__(self)
self._interval = interval
self._function = function
self._args = args if args is not None else []
self._kwargs = kwargs if kwargs is not None else {}
self._finished = threading.Event()
def run(self) -> None:
now = time.time()
next_call = now + self._interval
while (next_call <= now and not self._finished.is_set()) or \
(next_call > now and not self._finished.wait(next_call - now)):
if next_call <= now:
next_call = now + self._interval
else:
next_call = next_call + self._interval
self._function(*self._args, **self._kwargs)
now = time.time()
def cancel(self) -> None:
"""Stop the thread if it hasn't finished yet."""
self._finished.set()
| 12,727 | 38.775 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/utils/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/beam/beam_sdk_worker_main.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import sys
# force to register the operations to SDK Harness
import pyflink.fn_execution.beam.beam_operations # noqa # pylint: disable=unused-import
# force to register the coders to SDK Harness
import pyflink.fn_execution.beam.beam_coders # noqa # pylint: disable=unused-import
import apache_beam.runners.worker.sdk_worker_main
# disable bundle processor shutdown
from apache_beam.runners.worker import sdk_worker
sdk_worker.DEFAULT_BUNDLE_PROCESSOR_CACHE_SHUTDOWN_THRESHOLD_S = 86400 * 30
def print_to_logging(logging_func, msg, *args, **kwargs):
if msg != '\n':
logging_func(msg, *args, **kwargs)
class CustomPrint(object):
def __init__(self, _print):
self._msg_buffer = []
self._print = _print
def print(self, *args, sep=' ', end='\n', file=None):
self._msg_buffer.append(sep.join([str(arg) for arg in args]))
if end == '\n':
self._print(''.join(self._msg_buffer), sep=sep, end=end, file=file)
self._msg_buffer.clear()
else:
self._msg_buffer.append(end)
def close(self):
if self._msg_buffer:
self._print(''.join(self._msg_buffer), sep='', end='\n')
self._msg_buffer.clear()
def main():
import builtins
import logging
from functools import partial
# redirect stdout to logging.info, stderr to logging.error
_info = logging.getLogger().info
_error = logging.getLogger().error
sys.stdout.write = partial(print_to_logging, _info)
sys.stderr.write = partial(print_to_logging, _error)
custom_print = CustomPrint(print)
builtins.print = custom_print.print
# Remove all the built-in log handles
logging.getLogger().handlers = []
apache_beam.runners.worker.sdk_worker_main.main(sys.argv)
custom_print.close()
| 2,764 | 36.364865 | 87 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/beam/beam_coder_impl_slow.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from typing import Any
from apache_beam.coders.coder_impl import StreamCoderImpl, create_InputStream, create_OutputStream
from pyflink.fn_execution.stream_slow import OutputStream
from pyflink.fn_execution.beam.beam_stream_slow import BeamInputStream, BeamTimeBasedOutputStream
class PassThroughLengthPrefixCoderImpl(StreamCoderImpl):
def __init__(self, value_coder):
self._value_coder = value_coder
def encode_to_stream(self, value, out: create_OutputStream, nested: bool) -> Any:
self._value_coder.encode_to_stream(value, out, nested)
def decode_from_stream(self, in_stream: create_InputStream, nested: bool) -> Any:
return self._value_coder.decode_from_stream(in_stream, nested)
def get_estimated_size_and_observables(self, value: Any, nested=False):
return 0, []
def __repr__(self):
return 'PassThroughLengthPrefixCoderImpl[%s]' % self._value_coder
class FlinkFieldCoderBeamWrapper(StreamCoderImpl):
"""
Bridge between Beam coder and Flink coder for the low-level FieldCoder.
"""
def __init__(self, value_coder):
self._value_coder = value_coder
self._data_output_stream = OutputStream()
def encode_to_stream(self, value, out_stream: create_OutputStream, nested):
self._value_coder.encode_to_stream(value, self._data_output_stream)
out_stream.write(self._data_output_stream.get())
self._data_output_stream.clear()
def decode_from_stream(self, in_stream: create_InputStream, nested):
data_input_stream = BeamInputStream(in_stream)
return self._value_coder.decode_from_stream(data_input_stream)
def __repr__(self):
return 'FlinkFieldCoderBeamWrapper[%s]' % self._value_coder
class FlinkLengthPrefixCoderBeamWrapper(FlinkFieldCoderBeamWrapper):
"""
Bridge between Beam coder and Flink coder for the top-level LengthPrefixCoder.
"""
def __init__(self, value_coder):
super(FlinkLengthPrefixCoderBeamWrapper, self).__init__(value_coder)
self._output_stream = BeamTimeBasedOutputStream()
def encode_to_stream(self, value, out_stream: create_OutputStream, nested):
self._output_stream.reset_output_stream(out_stream)
self._value_coder.encode_to_stream(value, self._data_output_stream)
self._output_stream.write(self._data_output_stream.get())
self._data_output_stream.clear()
def __repr__(self):
return 'FlinkLengthPrefixCoderBeamWrapper[%s]' % self._value_coder
| 3,466 | 41.802469 | 98 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/beam/beam_coders.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from apache_beam.coders import Coder
from apache_beam.coders.coders import FastCoder, LengthPrefixCoder
from apache_beam.portability import common_urns
from apache_beam.typehints import typehints
from pyflink import fn_execution
from pyflink.fn_execution.coders import LengthPrefixBaseCoder
from pyflink.fn_execution.flink_fn_execution_pb2 import CoderInfoDescriptor
if fn_execution.PYFLINK_CYTHON_ENABLED:
from pyflink.fn_execution.beam import beam_coder_impl_fast as beam_coder_impl
from pyflink.fn_execution.beam.beam_coder_impl_fast import FlinkFieldCoderBeamWrapper
from pyflink.fn_execution.beam.beam_coder_impl_fast import FlinkLengthPrefixCoderBeamWrapper
else:
from pyflink.fn_execution.beam import beam_coder_impl_slow as beam_coder_impl
from pyflink.fn_execution.beam.beam_coder_impl_slow import FlinkFieldCoderBeamWrapper
from pyflink.fn_execution.beam.beam_coder_impl_slow import FlinkLengthPrefixCoderBeamWrapper
FLINK_CODER_URN = "flink:coder:v1"
class PassThroughLengthPrefixCoder(LengthPrefixCoder):
"""
Coder which doesn't prefix the length of the encoded object as the length prefix will be handled
by the wrapped value coder.
"""
def __init__(self, value_coder):
super(PassThroughLengthPrefixCoder, self).__init__(value_coder)
def _create_impl(self):
return beam_coder_impl.PassThroughLengthPrefixCoderImpl(self._value_coder.get_impl())
def __repr__(self):
return 'PassThroughLengthPrefixCoder[%s]' % self._value_coder
Coder.register_structured_urn(
common_urns.coders.LENGTH_PREFIX.urn, PassThroughLengthPrefixCoder)
class FlinkCoder(FastCoder):
def __init__(self, internal_coder):
self._internal_coder = internal_coder
def _create_impl(self):
return self._internal_coder.get_impl()
def get_impl(self):
if isinstance(self._internal_coder, LengthPrefixBaseCoder):
return FlinkLengthPrefixCoderBeamWrapper(self._create_impl())
else:
return FlinkFieldCoderBeamWrapper(self._create_impl())
def to_type_hint(self):
return typehints.Any
@Coder.register_urn(FLINK_CODER_URN, CoderInfoDescriptor)
def _pickle_from_runner_api_parameter(
coder_info_descriptor_proto, unused_components, unused_context):
return FlinkCoder(LengthPrefixBaseCoder.from_coder_info_descriptor_proto(
coder_info_descriptor_proto))
def __repr__(self):
return 'FlinkCoder[%s]' % repr(self._internal_coder)
def __eq__(self, other: 'FlinkCoder'):
return (self.__class__ == other.__class__
and self._internal_coder == other._internal_coder)
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._internal_coder)
| 3,770 | 39.117021 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/beam/beam_stream_slow.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from apache_beam.coders.coder_impl import create_InputStream, create_OutputStream
from pyflink.fn_execution.stream_slow import InputStream
from pyflink.fn_execution.utils.operation_utils import PeriodicThread
class BeamInputStream(InputStream):
def __init__(self, input_stream: create_InputStream):
super(BeamInputStream, self).__init__([])
self._input_stream = input_stream
def read(self, size):
return self._input_stream.read(size)
def read_byte(self):
return self._input_stream.read_byte()
def size(self):
return self._input_stream.size()
class BeamTimeBasedOutputStream(create_OutputStream):
def __init__(self):
super(BeamTimeBasedOutputStream).__init__()
self._flush_event = False
self._periodic_flusher = PeriodicThread(1, self.notify_flush)
self._periodic_flusher.daemon = True
self._periodic_flusher.start()
self._output_stream = None
def write(self, b: bytes):
self._output_stream.write(b)
def reset_output_stream(self, output_stream: create_OutputStream):
self._output_stream = output_stream
def notify_flush(self):
self._flush_event = True
def close(self):
if self._periodic_flusher:
self._periodic_flusher.cancel()
self._periodic_flusher = None
def maybe_flush(self):
if self._flush_event:
self._output_stream.flush()
self._flush_event = False
else:
self._output_stream.maybe_flush()
| 2,503 | 35.823529 | 81 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/beam/beam_worker_pool_service.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import atexit
import functools
import logging
import os
import sys
import threading
import traceback
import grpc
# In order to remove confusing infos produced by beam.
logging.getLogger().setLevel(logging.WARNING)
from apache_beam.options.pipeline_options import DebugOptions
from apache_beam.options.pipeline_options import ProfilingOptions
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import endpoints_pb2
from apache_beam.portability.api.org.apache.beam.model.fn_execution.v1.beam_provision_api_pb2 \
import GetProvisionInfoRequest
from apache_beam.portability.api.org.apache.beam.model.fn_execution.v1.beam_provision_api_pb2_grpc \
import ProvisionServiceStub
from apache_beam.runners.worker import sdk_worker_main
from apache_beam.runners.worker.log_handler import FnApiLogRecordHandler
from apache_beam.runners.worker.sdk_worker import SdkHarness
from apache_beam.utils import thread_pool_executor, profiler
from google.protobuf import json_format
from pyflink.fn_execution.beam import beam_sdk_worker_main # noqa # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
class BeamFnLoopbackWorkerPoolServicer(beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolServicer):
"""
Worker pool entry point.
The worker pool exposes an RPC service that is used in MiniCluster to start and stop the Python
SDK workers.
The worker pool uses child thread for parallelism
"""
_instance = None
def __new__(cls):
if cls._instance is None:
cls._instance = object.__new__(cls)
return cls._instance
def __init__(self):
self._parse_param_lock = threading.Lock()
self._worker_address = None
self._old_working_dir = None
self._old_python_path = None
self._ref_cnt = 0
def start(self):
if not self._worker_address:
worker_server = grpc.server(
thread_pool_executor.shared_unbounded_instance())
worker_address = 'localhost:%s' % worker_server.add_insecure_port('[::]:0')
beam_fn_api_pb2_grpc.add_BeamFnExternalWorkerPoolServicer_to_server(self, worker_server)
worker_server.start()
self._worker_address = worker_address
atexit.register(functools.partial(worker_server.stop, 1))
return self._worker_address
def StartWorker(self,
start_worker_request: beam_fn_api_pb2.StartWorkerRequest,
unused_context):
try:
self._start_sdk_worker_main(start_worker_request)
return beam_fn_api_pb2.StartWorkerResponse()
except Exception:
return beam_fn_api_pb2.StartWorkerResponse(error=traceback.format_exc())
def StopWorker(self,
stop_worker_request: beam_fn_api_pb2.StopWorkerRequest,
unused_context):
pass
def _start_sdk_worker_main(self, start_worker_request: beam_fn_api_pb2.StartWorkerRequest):
params = start_worker_request.params
self._parse_param_lock.acquire()
# The first thread to start is responsible for preparing all execution environment.
if not self._ref_cnt:
if 'PYTHONPATH' in params:
self._old_python_path = sys.path[:]
python_path_list = params['PYTHONPATH'].split(':')
python_path_list.reverse()
for path in python_path_list:
sys.path.insert(0, path)
if '_PYTHON_WORKING_DIR' in params:
self._old_working_dir = os.getcwd()
os.chdir(params['_PYTHON_WORKING_DIR'])
os.environ.update(params)
self._ref_cnt += 1
self._parse_param_lock.release()
# read job information from provision stub
metadata = [("worker_id", start_worker_request.worker_id)]
provision_endpoint = start_worker_request.provision_endpoint.url
with grpc.insecure_channel(provision_endpoint) as channel:
client = ProvisionServiceStub(channel=channel)
info = client.GetProvisionInfo(GetProvisionInfoRequest(), metadata=metadata).info
options = json_format.MessageToJson(info.pipeline_options)
logging_endpoint = info.logging_endpoint.url
control_endpoint = info.control_endpoint.url
try:
logging_service_descriptor = endpoints_pb2.ApiServiceDescriptor(url=logging_endpoint)
# Send all logs to the runner.
fn_log_handler = FnApiLogRecordHandler(logging_service_descriptor)
logging.getLogger().setLevel(logging.INFO)
# Remove all the built-in log handles
logging.getLogger().handlers = []
logging.getLogger().addHandler(fn_log_handler)
logging.info("Starting up Python worker in loopback mode.")
except Exception:
_LOGGER.error(
"Failed to set up logging handler, continuing without.",
exc_info=True)
fn_log_handler = None
sdk_pipeline_options = sdk_worker_main._parse_pipeline_options(options)
_worker_id = start_worker_request.worker_id
try:
control_service_descriptor = endpoints_pb2.ApiServiceDescriptor(url=control_endpoint)
status_service_descriptor = endpoints_pb2.ApiServiceDescriptor()
experiments = sdk_pipeline_options.view_as(DebugOptions).experiments or []
enable_heap_dump = 'enable_heap_dump' in experiments
SdkHarness(
control_address=control_service_descriptor.url,
status_address=status_service_descriptor.url,
worker_id=_worker_id,
state_cache_size=sdk_worker_main._get_state_cache_size(experiments),
data_buffer_time_limit_ms=sdk_worker_main._get_data_buffer_time_limit_ms(
experiments),
profiler_factory=profiler.Profile.factory_from_options(
sdk_pipeline_options.view_as(ProfilingOptions)),
enable_heap_dump=enable_heap_dump).run()
except: # pylint: disable=broad-except
_LOGGER.exception('Python sdk harness failed: ')
raise
finally:
self._parse_param_lock.acquire()
self._ref_cnt -= 1
# The last thread to exit is responsible for reverting working directory and sys.path.
if self._ref_cnt == 0:
if self._old_python_path is not None:
sys.path.clear()
for item in self._old_python_path:
sys.path.append(item)
self._old_python_path = None
if self._old_working_dir is not None:
os.chdir(self._old_working_dir)
self._old_working_dir = None
self._parse_param_lock.release()
if fn_log_handler:
fn_log_handler.close()
| 8,030 | 42.646739 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/beam/beam_operations_slow.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import abc
from abc import abstractmethod
from typing import Iterable, Any, Dict, List
from apache_beam.runners.worker.bundle_processor import TimerInfo, DataOutputOperation
from apache_beam.runners.worker.operations import Operation
from apache_beam.utils import windowed_value
from apache_beam.utils.windowed_value import WindowedValue
from pyflink.common.constants import DEFAULT_OUTPUT_TAG
from pyflink.fn_execution.flink_fn_execution_pb2 import UserDefinedDataStreamFunction
from pyflink.fn_execution.table.operations import BundleOperation
from pyflink.fn_execution.profiler import Profiler
class OutputProcessor(abc.ABC):
@abstractmethod
def process_outputs(self, windowed_value: WindowedValue, results: Iterable[Any]):
pass
def close(self):
pass
class NetworkOutputProcessor(OutputProcessor):
def __init__(self, consumer):
assert isinstance(consumer, DataOutputOperation)
self._consumer = consumer
self._value_coder_impl = consumer.windowed_coder.wrapped_value_coder.get_impl()._value_coder
def process_outputs(self, windowed_value: WindowedValue, results: Iterable[Any]):
output_stream = self._consumer.output_stream
self._value_coder_impl.encode_to_stream(results, output_stream, True)
self._value_coder_impl._output_stream.maybe_flush()
def close(self):
self._value_coder_impl._output_stream.close()
class IntermediateOutputProcessor(OutputProcessor):
def __init__(self, consumer):
self._consumer = consumer
def process_outputs(self, windowed_value: WindowedValue, results: Iterable[Any]):
self._consumer.process(windowed_value.with_value(results))
class FunctionOperation(Operation):
"""
Base class of function operation that will execute StatelessFunction or StatefulFunction for
each input element.
"""
def __init__(self, name, spec, counter_factory, sampler, consumers, operation_cls,
operator_state_backend):
super(FunctionOperation, self).__init__(name, spec, counter_factory, sampler)
self._output_processors = self._create_output_processors(
consumers
) # type: Dict[str, List[OutputProcessor]]
self.operation_cls = operation_cls
self.operator_state_backend = operator_state_backend
self.operation = self.generate_operation()
self.process_element = self.operation.process_element
self.operation.open()
if spec.serialized_fn.profile_enabled:
self._profiler = Profiler()
else:
self._profiler = None
if isinstance(spec.serialized_fn, UserDefinedDataStreamFunction):
self._has_side_output = spec.serialized_fn.has_side_output
else:
# it doesn't support side output in Table API & SQL
self._has_side_output = False
if not self._has_side_output:
self._main_output_processor = self._output_processors[DEFAULT_OUTPUT_TAG][0]
def setup(self):
super(FunctionOperation, self).setup()
def start(self):
with self.scoped_start_state:
super(FunctionOperation, self).start()
if self._profiler:
self._profiler.start()
def finish(self):
with self.scoped_finish_state:
super(FunctionOperation, self).finish()
self.operation.finish()
if self._profiler:
self._profiler.close()
def needs_finalization(self):
return False
def reset(self):
super(FunctionOperation, self).reset()
def teardown(self):
with self.scoped_finish_state:
self.operation.close()
for processors in self._output_processors.values():
for p in processors:
p.close()
def progress_metrics(self):
metrics = super(FunctionOperation, self).progress_metrics()
metrics.processed_elements.measured.output_element_counts.clear()
tag = None
receiver = self.receivers[0]
metrics.processed_elements.measured.output_element_counts[
str(tag)
] = receiver.opcounter.element_counter.value()
return metrics
def process(self, o: WindowedValue):
with self.scoped_process_state:
if self._has_side_output:
for value in o.value:
for tag, row in self.process_element(value):
for p in self._output_processors.get(tag, []):
p.process_outputs(o, [row])
else:
if isinstance(self.operation, BundleOperation):
for value in o.value:
self.process_element(value)
self._main_output_processor.process_outputs(o, self.operation.finish_bundle())
else:
for value in o.value:
self._main_output_processor.process_outputs(
o, self.operation.process_element(value)
)
def monitoring_infos(self, transform_id, tag_to_pcollection_id):
"""
Only pass user metric to Java
:param tag_to_pcollection_id: useless for user metric
"""
return super().user_monitoring_infos(transform_id)
@staticmethod
def _create_output_processors(consumers_map):
def _create_processor(consumer):
if isinstance(consumer, DataOutputOperation):
return NetworkOutputProcessor(consumer)
else:
return IntermediateOutputProcessor(consumer)
return {
tag: [_create_processor(c) for c in consumers]
for tag, consumers in consumers_map.items()
}
@abstractmethod
def generate_operation(self):
pass
class StatelessFunctionOperation(FunctionOperation):
def __init__(self, name, spec, counter_factory, sampler, consumers, operation_cls,
operator_state_backend):
super(StatelessFunctionOperation, self).__init__(
name, spec, counter_factory, sampler, consumers, operation_cls, operator_state_backend
)
def generate_operation(self):
if self.operator_state_backend is not None:
return self.operation_cls(self.spec.serialized_fn, self.operator_state_backend)
else:
return self.operation_cls(self.spec.serialized_fn)
class StatefulFunctionOperation(FunctionOperation):
def __init__(self, name, spec, counter_factory, sampler, consumers, operation_cls,
keyed_state_backend, operator_state_backend):
self._keyed_state_backend = keyed_state_backend
self._reusable_windowed_value = windowed_value.create(None, -1, None, None)
super(StatefulFunctionOperation, self).__init__(
name, spec, counter_factory, sampler, consumers, operation_cls, operator_state_backend
)
def generate_operation(self):
if self.operator_state_backend is not None:
return self.operation_cls(self.spec.serialized_fn, self._keyed_state_backend,
self.operator_state_backend)
else:
return self.operation_cls(self.spec.serialized_fn, self._keyed_state_backend)
def add_timer_info(self, timer_family_id: str, timer_info: TimerInfo):
# ignore timer_family_id
self.operation.add_timer_info(timer_info)
def process_timer(self, tag, timer_data):
if self._has_side_output:
# the field user_key holds the timer data
for tag, row in self.operation.process_timer(timer_data.user_key):
for p in self._output_processors.get(tag, []):
p.process_outputs(self._reusable_windowed_value, [row])
else:
self._main_output_processor.process_outputs(
self._reusable_windowed_value,
# the field user_key holds the timer data
self.operation.process_timer(timer_data.user_key),
)
| 9,035 | 38.982301 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/beam/__init__.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
| 958 | 52.277778 | 80 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/beam/beam_boot.py
|
#!/usr/bin/env python
#################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
"""
This script is a python implementation of the "boot.go" script in "beam-sdks-python-container"
project of Apache Beam, see in:
https://github.com/apache/beam/blob/release-2.14.0/sdks/python/container/boot.go
It is implemented in golang and will introduce unnecessary dependencies if used in pure python
project. So we add a python implementation which will be used when the python worker runs in
process mode. It downloads and installs users' python artifacts, then launches the python SDK
harness of Apache Beam.
"""
import argparse
import logging
import os
import sys
import grpc
from apache_beam.portability.api.org.apache.beam.model.fn_execution.v1.beam_fn_api_pb2 import \
StartWorkerRequest
from apache_beam.portability.api.org.apache.beam.model.fn_execution.v1.beam_fn_api_pb2_grpc import (
BeamFnExternalWorkerPoolStub)
from apache_beam.portability.api.org.apache.beam.model.fn_execution.v1.beam_provision_api_pb2 \
import GetProvisionInfoRequest
from apache_beam.portability.api.org.apache.beam.model.fn_execution.v1.beam_provision_api_pb2_grpc \
import ProvisionServiceStub
from apache_beam.portability.api.org.apache.beam.model.pipeline.v1.endpoints_pb2 import (
ApiServiceDescriptor)
from google.protobuf import json_format, text_format
def check_not_empty(check_str, error_message):
if check_str == "":
logging.fatal(error_message)
exit(1)
python_exec = sys.executable
if __name__ == "__main__":
# print INFO and higher level messages
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("--id", default="", help="Local identifier (required).")
parser.add_argument("--provision_endpoint", default="",
help="Provision endpoint (required).")
parser.add_argument("--semi_persist_dir", default="/tmp",
help="Local semi-persistent directory (optional).")
args = parser.parse_known_args()[0]
worker_id = args.id
provision_endpoint = args.provision_endpoint
semi_persist_dir = args.semi_persist_dir
check_not_empty(worker_id, "No id provided.")
check_not_empty(provision_endpoint, "No provision endpoint provided.")
logging.info("Initializing Python harness: %s" % " ".join(sys.argv))
if 'PYTHON_LOOPBACK_SERVER_ADDRESS' in os.environ:
logging.info("Starting up Python harness in loopback mode.")
params = dict(os.environ)
params.update({'SEMI_PERSISTENT_DIRECTORY': semi_persist_dir})
with grpc.insecure_channel(os.environ['PYTHON_LOOPBACK_SERVER_ADDRESS']) as channel:
client = BeamFnExternalWorkerPoolStub(channel=channel)
request = StartWorkerRequest(
worker_id=worker_id,
provision_endpoint=ApiServiceDescriptor(url=provision_endpoint),
params=params)
response = client.StartWorker(request)
if response.error:
raise RuntimeError("Error starting worker: %s" % response.error)
else:
logging.info("Starting up Python harness in a standalone process.")
metadata = [("worker_id", worker_id)]
# read job information from provision stub
with grpc.insecure_channel(provision_endpoint) as channel:
client = ProvisionServiceStub(channel=channel)
info = client.GetProvisionInfo(GetProvisionInfoRequest(), metadata=metadata).info
options = json_format.MessageToJson(info.pipeline_options)
logging_endpoint = info.logging_endpoint.url
control_endpoint = info.control_endpoint.url
os.environ["WORKER_ID"] = worker_id
os.environ["PIPELINE_OPTIONS"] = options
os.environ["SEMI_PERSISTENT_DIRECTORY"] = semi_persist_dir
os.environ["LOGGING_API_SERVICE_DESCRIPTOR"] = text_format.MessageToString(
ApiServiceDescriptor(url=logging_endpoint))
os.environ["CONTROL_API_SERVICE_DESCRIPTOR"] = text_format.MessageToString(
ApiServiceDescriptor(url=control_endpoint))
env = dict(os.environ)
if "FLINK_BOOT_TESTING" in os.environ and os.environ["FLINK_BOOT_TESTING"] == "1":
logging.info("Shut down Python harness due to FLINK_BOOT_TESTING is set.")
exit(0)
from pyflink.fn_execution.beam import beam_sdk_worker_main
beam_sdk_worker_main.main()
| 5,403 | 42.580645 | 100 |
py
|
flink
|
flink-master/flink-python/pyflink/fn_execution/beam/beam_operations.py
|
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from apache_beam.portability import common_urns
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.runners import common
from apache_beam.runners.worker import bundle_processor, operation_specs
from apache_beam.utils import proto_utils
from pyflink import fn_execution
if fn_execution.PYFLINK_CYTHON_ENABLED:
import pyflink.fn_execution.beam.beam_operations_fast as beam_operations
else:
import pyflink.fn_execution.beam.beam_operations_slow as beam_operations
from pyflink.fn_execution import flink_fn_execution_pb2
from pyflink.fn_execution.coders import from_proto, from_type_info_proto, TimeWindowCoder, \
CountWindowCoder, FlattenRowCoder
from pyflink.fn_execution.state_impl import RemoteKeyedStateBackend, RemoteOperatorStateBackend
import pyflink.fn_execution.datastream.operations as datastream_operations
from pyflink.fn_execution.datastream.process import operations
import pyflink.fn_execution.table.operations as table_operations
# ----------------- UDF --------------------
@bundle_processor.BeamTransformFactory.register_urn(
table_operations.SCALAR_FUNCTION_URN, flink_fn_execution_pb2.UserDefinedFunctions)
def create_scalar_function(factory, transform_id, transform_proto, parameter, consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatelessFunctionOperation,
table_operations.ScalarFunctionOperation)
# ----------------- UDTF --------------------
@bundle_processor.BeamTransformFactory.register_urn(
table_operations.TABLE_FUNCTION_URN, flink_fn_execution_pb2.UserDefinedFunctions)
def create_table_function(factory, transform_id, transform_proto, parameter, consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatelessFunctionOperation,
table_operations.TableFunctionOperation)
# ----------------- UDAF --------------------
@bundle_processor.BeamTransformFactory.register_urn(
table_operations.STREAM_GROUP_AGGREGATE_URN,
flink_fn_execution_pb2.UserDefinedAggregateFunctions)
def create_aggregate_function(factory, transform_id, transform_proto, parameter, consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatefulFunctionOperation,
table_operations.StreamGroupAggregateOperation)
@bundle_processor.BeamTransformFactory.register_urn(
table_operations.STREAM_GROUP_TABLE_AGGREGATE_URN,
flink_fn_execution_pb2.UserDefinedAggregateFunctions)
def create_table_aggregate_function(factory, transform_id, transform_proto, parameter, consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatefulFunctionOperation,
table_operations.StreamGroupTableAggregateOperation)
@bundle_processor.BeamTransformFactory.register_urn(
table_operations.STREAM_GROUP_WINDOW_AGGREGATE_URN,
flink_fn_execution_pb2.UserDefinedAggregateFunctions)
def create_group_window_aggregate_function(factory, transform_id, transform_proto, parameter,
consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatefulFunctionOperation,
table_operations.StreamGroupWindowAggregateOperation)
# ----------------- Pandas UDAF --------------------
@bundle_processor.BeamTransformFactory.register_urn(
table_operations.PANDAS_AGGREGATE_FUNCTION_URN, flink_fn_execution_pb2.UserDefinedFunctions)
def create_pandas_aggregate_function(factory, transform_id, transform_proto, parameter, consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatelessFunctionOperation,
table_operations.PandasAggregateFunctionOperation)
@bundle_processor.BeamTransformFactory.register_urn(
table_operations.PANDAS_BATCH_OVER_WINDOW_AGGREGATE_FUNCTION_URN,
flink_fn_execution_pb2.UserDefinedFunctions)
def create_pandas_over_window_aggregate_function(
factory, transform_id, transform_proto, parameter, consumers):
return _create_user_defined_function_operation(
factory, transform_proto, consumers, parameter,
beam_operations.StatelessFunctionOperation,
table_operations.PandasBatchOverWindowAggregateFunctionOperation)
# ----------------- DataStream --------------------
@bundle_processor.BeamTransformFactory.register_urn(
common_urns.primitives.PAR_DO.urn, beam_runner_api_pb2.ParDoPayload)
def create_data_stream_keyed_process_function(factory, transform_id, transform_proto, parameter,
consumers):
urn = parameter.do_fn.urn
payload = proto_utils.parse_Bytes(
parameter.do_fn.payload, flink_fn_execution_pb2.UserDefinedDataStreamFunction)
if urn == datastream_operations.DATA_STREAM_STATELESS_FUNCTION_URN:
return _create_user_defined_function_operation(
factory, transform_proto, consumers, payload,
beam_operations.StatelessFunctionOperation,
operations.StatelessOperation)
else:
return _create_user_defined_function_operation(
factory, transform_proto, consumers, payload,
beam_operations.StatefulFunctionOperation,
operations.StatefulOperation)
# ----------------- Utilities --------------------
def _create_user_defined_function_operation(factory, transform_proto, consumers, udfs_proto,
beam_operation_cls, internal_operation_cls):
output_tags = list(transform_proto.outputs.keys())
output_coders = factory.get_output_coders(transform_proto)
spec = operation_specs.WorkerDoFn(
serialized_fn=udfs_proto,
output_tags=output_tags,
input=None,
side_inputs=None,
output_coders=[output_coders[tag] for tag in output_tags])
name = common.NameContext(transform_proto.unique_name)
serialized_fn = spec.serialized_fn
if isinstance(serialized_fn, flink_fn_execution_pb2.UserDefinedDataStreamFunction):
operator_state_backend = RemoteOperatorStateBackend(
factory.state_handler,
serialized_fn.state_cache_size,
serialized_fn.map_state_read_cache_size,
serialized_fn.map_state_write_cache_size,
)
else:
operator_state_backend = None
if hasattr(serialized_fn, "key_type"):
# keyed operation, need to create the KeyedStateBackend.
row_schema = serialized_fn.key_type.row_schema
key_row_coder = FlattenRowCoder([from_proto(f.type) for f in row_schema.fields])
if serialized_fn.HasField('group_window'):
if serialized_fn.group_window.is_time_window:
window_coder = TimeWindowCoder()
else:
window_coder = CountWindowCoder()
else:
window_coder = None
keyed_state_backend = RemoteKeyedStateBackend(
factory.state_handler,
key_row_coder,
window_coder,
serialized_fn.state_cache_size,
serialized_fn.map_state_read_cache_size,
serialized_fn.map_state_write_cache_size)
return beam_operation_cls(
name,
spec,
factory.counter_factory,
factory.state_sampler,
consumers,
internal_operation_cls,
keyed_state_backend,
operator_state_backend,
)
elif internal_operation_cls == operations.StatefulOperation:
key_row_coder = from_type_info_proto(serialized_fn.key_type_info)
keyed_state_backend = RemoteKeyedStateBackend(
factory.state_handler,
key_row_coder,
None,
serialized_fn.state_cache_size,
serialized_fn.map_state_read_cache_size,
serialized_fn.map_state_write_cache_size)
return beam_operation_cls(
name,
spec,
factory.counter_factory,
factory.state_sampler,
consumers,
internal_operation_cls,
keyed_state_backend,
operator_state_backend,
)
else:
return beam_operation_cls(
name,
spec,
factory.counter_factory,
factory.state_sampler,
consumers,
internal_operation_cls,
operator_state_backend,
)
| 9,647 | 41.502203 | 99 |
py
|
Robust-Training-for-Time-Series
|
Robust-Training-for-Time-Series-main/CNNmodel.py
|
import sys
import os
import numpy as np
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
import pickle as pkl
from GAK import tf_gak
def clip_tensor(X, eps, norm=np.inf):
if norm not in [np.inf, 2]:
raise ValueError('Inadequate norm')
axis = list(range(1, len(X.get_shape())))
avoid_zero_div = 1e-12
if norm == np.inf:
X = tf.clip_by_value(X, -eps, eps)
elif norm == 2:
norm = tf.sqrt(tf.maximum(avoid_zero_div, tf.reduce_sum(tf.square(X), axis, keepdims=True)))
factor = tf.minimum(1., tf.math.divide(eps, norm))
X = X * factor
return X
def dtw_differntiable(path, x, y, tf_norm=2):
"""
Make the optimal path a distance function
"""
x_path = tf.convert_to_tensor(path[0])
y_path = tf.convert_to_tensor(path[1])
if len(x_path) != len(y_path):
raise ValueError("Error in DTW path length")
else:
dtw_dist = tf.norm(x[x_path[0]] - y[y_path[0]], ord=tf_norm)
for i in range(1, len(x_path)):
dtw_dist = tf.add(dtw_dist, tf.norm(x[x_path[i]] - y[y_path[i]], ord=tf_norm))
return dtw_dist
#CNN Architecture
class cnn_class():
def __init__(self, name, seg_size, channel_nb, class_nb, arch='1'):
self.name = name
self.seg_size = seg_size
self.channel_nb = channel_nb
self.class_nb = class_nb
self.x_holder = []
self.y_holder = []
self.y_ =[]
if arch=='0':
self.trunk_model = tf.keras.Sequential([
#Layers
tf.keras.layers.Conv2D(20,[1, 12], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 2), strides=2),
#Fully connected layer
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.BatchNormalization(),
#Logits layer
tf.keras.layers.Dense(self.class_nb)
])
if arch=='1':
self.trunk_model = tf.keras.Sequential([
#Layers
tf.keras.layers.Conv2D(66,[1, 12], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 4), strides=4),
#Fully connected layer
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(1024, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.15),
tf.keras.layers.BatchNormalization(),
#Logits layer
tf.keras.layers.Dense(self.class_nb)
])
elif arch=='2':
self.trunk_model = tf.keras.Sequential([
#Layers
tf.keras.layers.Conv2D(100,[1, 12], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 4), strides=1),
tf.keras.layers.Conv2D(50,[1, 5], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 4), strides=1),
tf.keras.layers.Conv2D(50,[1, 3], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 2), strides=1),
#Fully connected layer
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(200, activation=tf.nn.relu),
tf.keras.layers.Dense(100, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.BatchNormalization(),
#Logits layer
tf.keras.layers.Dense(self.class_nb)
])
elif arch=='3':
self.trunk_model = tf.keras.Sequential([
#Layers
tf.keras.layers.Conv2D(100,[1, 12], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 4), strides=1),
tf.keras.layers.Conv2D(50,[1, 6], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 4), strides=1),
tf.keras.layers.Conv2D(25,[1, 3], padding="same", input_shape=(1, self.seg_size, self.channel_nb)),
tf.keras.layers.MaxPooling2D((1, 2), strides=1),
#Fully connected layer
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(100, activation=tf.nn.relu),
tf.keras.layers.Dense(50, activation=tf.nn.relu),
tf.keras.layers.Dropout(0.15),
tf.keras.layers.BatchNormalization(),
#Logits layer
tf.keras.layers.Dense(self.class_nb)
])
self.model = tf.keras.Sequential([self.trunk_model,
tf.keras.layers.Softmax()])
#Training Functions
self.loss_fn = tf.keras.losses.SparseCategoricalCrossentropy()
self.optimizer = tf.keras.optimizers.Adam(1e-3)
def train(self, train_set, checkpoint_path="TrainingRes/model_target", epochs=10, new_train=False):
@tf.function
def train_step(X, y):
with tf.GradientTape() as tape:
pred = self.model(X, training=True)
pred_loss = self.loss_fn(y, pred)
total_loss = pred_loss
gradients = tape.gradient(total_loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))
if not new_train:
self.model.load_weights(checkpoint_path)
sys.stdout.write("\nWeights loaded!")
else:
for ep in range(epochs):
sys.stdout.write("\r{}: Epochs {}/{} . . .".format(self.name, ep+1, epochs))
sys.stdout.flush()
for X, y in train_set:
train_step(X, y)
self.model.save_weights(checkpoint_path)
sys.stdout.write("\n")
def rots_train(self, train_set, a_shape, K, checkpoint_path="TrainingRes/rots_model",
gamma_gak=1, gak_sampled_paths=100, path_limit=100, gak_random_kill=5,
lbda=1.0, gamma_k=1, eta_k=1e-2, beta=5e-2, a_init=1e-2, omega=1e-3,
X_valid=[], y_valid=[],
uses_L2=False, new_train=False, verbose=False):
model_path = checkpoint_path+'/'+self.name
def sample_function(input_data):
rand_batch = np.random.randint(0, nb_batches)
i=0
warnings.warn("\nSample function details: nb_batches:{} - rand_batch:{}".format(nb_batches, rand_batch))
for X, y in input_data:
warnings.warn("\nSample function In-Loop: i:{} - X:{}".format(i, X))
if i==rand_batch:
return X, y
i += 1
def dist_func(x1, x2, use_log=True, path_limit=path_limit):
if use_log:
return -tf.math.log(tf_gak(x1, x2, gamma_gak, path_limit=path_limit, random_kill=gak_random_kill))
else:
return tf_gak(x1, x2, gamma_gak, path_limit=path_limit, random_kill=gak_random_kill)
@tf.function
def GW_ro_train_step(X, y, a, lbda):
with tf.GradientTape() as tape1:
pred = self.model(tf.add(X, a), training=True)
loss_it = self.loss_fn(y, pred)
G_w = tape1.gradient(loss_it, self.model.trainable_variables)
if verbose: sys.stdout.write("\n---Current Loss_w:", loss_it)
return G_w
@tf.function
def Ga_ro_train_step(X, y, a, lbda):
with tf.GradientTape() as tape2:
tape2.watch(a)
D_nl = dist_func(X, tf.add(X, a), use_log=False, path_limit=path_limit) #D no log = h_ij
self.omega = tf.add(tf.multiply(tf.subtract(tf.cast(1, dtype=tf.float64),beta), self.omega), tf.multiply(beta, D_nl))#line 8
G_omega = tape2.gradient(-tf.math.log(self.omega), a)
with tf.GradientTape() as tape2:
tape2.watch(a)
pred_a = self.model(tf.add(X, a), training=True)
loss_it_a = tf.cast(self.loss_fn(y, pred_a), tf.float64)
G_a_pred = tape2.gradient(loss_it_a, a)
G_a = tf.add(G_a_pred, tf.multiply(lbda, tf.add(G_omega,a)))
return G_a
@tf.function
def Ga_euclidean(X, y, a, lbda):
with tf.GradientTape() as tape2:
tape2.watch(a)
pred = self.model(tf.add(X, a), training=True)
loss_it = tf.cast(self.loss_fn(y, pred), tf.float64)
D = tf.norm(a, ord='euclidean')
loss_a = tf.add(loss_it, tf.multiply(lbda, D))
grad_a = tape2.gradient(loss_a, a)
G_a = tf.add(grad_a, tf.multiply(lbda, a))
return G_a
def rots_train_step(X, y):
G_w = GW_ro_train_step(X, y, self.a, self.lbda) #Get the gradient
self.ro_optimizer.apply_gradients(zip(G_w, self.model.trainable_variables)) #line 6
if not uses_L2: #line 7
G_a = Ga_ro_train_step(X, y, self.a, self.lbda)
else:
G_a = Ga_euclidean(X, y, self.a, self.lbda)
self.a = tf.add(self.a, tf.multiply(self.gamma_k_value, G_a)) #line 13
if not new_train:
self.model.load_weights(model_path)
sys.stdout.write("\nWeights loaded!")
else:
self.omega = omega
#decaying l_r of eta_k
boundaries = list(np.arange(np.ceil(K/4),K, 1e-2*K))
values = [eta_k]
for i, _ in enumerate(boundaries):
values.append(eta_k/(2**(i+1)))
lr_schedule_fn = tf.keras.optimizers.schedules.PiecewiseConstantDecay(boundaries, values)
self.ro_optimizer = tf.keras.optimizers.SGD(learning_rate=lr_schedule_fn(tf.Variable(0)))
#decaying l_r of gamma_k
gamma_k_init = gamma_k
gamma_k_decay = [gamma_k_init]
for i in range(1,K):
if i%10==0:
gamma_k_init /= 10
gamma_k_decay.append(gamma_k_init)
gamma_k_decay = tf.convert_to_tensor(gamma_k_decay, dtype=tf.float64)
sys.stdout.write("\nROTS training ...")
self.a = a_init * tf.ones(a_shape, dtype=tf.float64)
beta= tf.Variable(beta, dtype=tf.float64)
self.lbda = tf.cast(lbda, dtype=tf.float64)
min_loss = np.inf
k = 0
for X, y in train_set:
k += 1
if k%10==1:sys.stdout.write("\nK={}/{}".format(k, K))
sys.stdout.flush()
#X, y = sample_function(train_set) #line 4
self.gamma_k_value = gamma_k_decay[k]
rots_train_step(X, y)
sys.stdout.flush()
self.model.save_weights(model_path)
### Save best weights
if len(X_valid)>0:
pred_t = self.model(X_valid)
loss_t = self.loss_fn(y_valid, pred_t)
if loss_t <= min_loss:
best_W_T = self.ro_optimizer.get_weights()
if verbose: sys.stdout.write("\nBest weight validatio score: {:.2f}".format(self.score(X_valid, y_valid)))
min_loss = loss_t
sys.stdout.write(" . . . Validation Score: {:.2f}\n".format((self.score(X_valid, y_valid))))
sys.stdout.flush()
if len(X_valid)>0:
self.ro_optimizer.set_weights(best_W_T)
self.model.save_weights(model_path)
print()
def predict(self, X):
return tf.argmax(self.model(X, training=False), 1)
def predict_stmax(self, X):
return self.trunk_model(X, training=False)
def score(self, X, y):
X = tf.cast(X, tf.float64)
acc = tf.keras.metrics.Accuracy()
acc.reset_states()
pred = self.predict(X)
acc.update_state(pred, y)
return acc.result().numpy()
| 12,895 | 42.275168 | 140 |
py
|
Robust-Training-for-Time-Series
|
Robust-Training-for-Time-Series-main/hyparamtuning.py
|
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 7 09:58:45 2020
@author: BkTaha
"""
import json
import pickle as pkl
import numpy as np
from sklearn.metrics import pairwise_distances
def get_hyparams(dataset_name):
json_param = "datasets_parameters.json"
with open(json_param) as jf:
info = json.load(jf)
d = info[dataset_name]
path = d['path']
X_train, y_train, X_test, y_test = pkl.load(open(path, 'rb'))
#Using median
D = pairwise_distances(X_train[:,0,:,0], X_train[:,0,:,0], metric='seuclidean')
D_T = []
for i in range(D.shape[0]):
for j in range(i+1, D.shape[1]):
D_T.append(D[i,j])
gamma = np.median(D_T)
lbda = 10**-(np.ceil(np.log(gamma)/np.log(10))-2) #start value
return gamma, lbda
| 804 | 25.833333 | 83 |
py
|
Robust-Training-for-Time-Series
|
Robust-Training-for-Time-Series-main/preprocess_dataset.py
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 8 12:54:10 2019
@author: BkTaha
"""
import os
import json
import numpy as np
import pickle as pkl
import pandas as pd
from scipy.io import arff
from absl import app, flags
FLAGS = flags.FLAGS
def load_ucr_data(dataset_name, parent_file):
#Extract Data Dimensions
dim_df = pd.read_csv("DatasetUVDimensions.csv")
ds_idx = dim_df[dim_df["problem"]==dataset_name].index[0]
ds_trn_size = int(dim_df.at[ds_idx, "numTrainCases"])
ds_tst_size = int(dim_df.at[ds_idx, "numTestCases"])
ds_seg_size = int(dim_df.at[ds_idx, "seriesLength"])
#Extract TrainData
X_train = np.zeros((ds_trn_size, 1, ds_seg_size, 1))
data_file = parent_file+"/"+dataset_name+"_TRAIN.arff"
data, meta = arff.loadarff(data_file)
train_data = data[meta.names()[:-1]] #everything but the last column
train_data = np.array(train_data.tolist())
X_train[:,:,:,0] = train_data.reshape((ds_trn_size, 1,ds_seg_size))
#Extract TrainLabels
data, meta = arff.loadarff(open(data_file, "r"))
train_lbl = data[meta.names()[-1]] #LastColumn
train_lbl = np.array([ss.decode('ascii') for ss in train_lbl])
labels = {}
for i, y in enumerate(np.sort(np.unique(train_lbl))):
labels[y]=i
y_train = np.array([labels[y] for y in train_lbl])
#Extract TestData
X_test = np.zeros((ds_tst_size, 1, ds_seg_size, 1))
data_file = parent_file+"/"+dataset_name+"_TEST.arff"
data, meta = arff.loadarff(data_file)
test_data = data[meta.names()[:-1]] #everything but the last column
test_data = np.array(test_data.tolist())
X_test[:,:,:,0] = test_data.reshape((ds_tst_size, 1,ds_seg_size))
#Extract TestLabels
data, meta = arff.loadarff(open(data_file, "r"))
test_lbl = data[meta.names()[-1]] #LastColumn
test_lbl = np.array([ss.decode('ascii') for ss in test_lbl])
labels = {}
for i, y in enumerate(np.sort(np.unique(test_lbl))):
labels[y]=i
y_test = np.array([labels[y] for y in test_lbl])
rand_indices = np.arange(X_train.shape[0])
np.random.shuffle(rand_indices)
X_train = X_train[rand_indices]
y_train = y_train[rand_indices]
rand_indices = np.arange(X_test.shape[0])
np.random.shuffle(rand_indices)
X_test = X_test[rand_indices]
y_test = y_test[rand_indices]
return X_train, y_train, X_test, y_test
def load_mv_ucr_data(dataset_name, parent_file):
#Extract Data Dimensions
dim_df = pd.read_csv("DatasetMVDimensions.csv")
ds_idx = dim_df[dim_df["Problem"]==dataset_name].index[0]
ds_trn_size = int(dim_df.at[ds_idx, "TrainSize"])
ds_tst_size = int(dim_df.at[ds_idx, "TestSize"])
ds_channel_nb = int(dim_df.at[ds_idx, "NumDimensions"])
ds_seg_size = int(dim_df.at[ds_idx, "SeriesLength"])
#Extract TrainData
X_train = np.zeros((ds_trn_size, 1, ds_seg_size, ds_channel_nb))
for ch in range(ds_channel_nb):
data_file = parent_file+"/"+dataset_name+"Dimension"+str(ch+1)+"_TRAIN.arff"
data, meta = arff.loadarff(data_file)
train_data = data[meta.names()[:-1]] #everything but the last column
train_data = np.array(train_data.tolist())
X_train[:,:,:,ch] = train_data.reshape((ds_trn_size, 1,ds_seg_size))
#Extract TrainLabels
data, meta = arff.loadarff(open(data_file, "r"))
train_lbl = data[meta.names()[-1]] #LastColumn
train_lbl = np.array([ss.decode('ascii') for ss in train_lbl])
labels = {}
for i, y in enumerate(np.sort(np.unique(train_lbl))):
labels[y]=i
y_train = np.array([labels[y] for y in train_lbl])
#Extract TestData
X_test = np.zeros((ds_tst_size, 1, ds_seg_size, ds_channel_nb))
for ch in range(ds_channel_nb):
data_file = parent_file+"/"+dataset_name+"Dimension"+str(ch+1)+"_TEST.arff"
data, meta = arff.loadarff(data_file)
test_data = data[meta.names()[:-1]] #everything but the last column
test_data = np.array(test_data.tolist())
X_test[:,:,:,ch] = test_data.reshape((ds_tst_size, 1,ds_seg_size))
#Extract TestLabels
data, meta = arff.loadarff(open(data_file, "r"))
test_lbl = data[meta.names()[-1]] #LastColumn
test_lbl = np.array([ss.decode('ascii') for ss in test_lbl])
labels = {}
for i, y in enumerate(np.sort(np.unique(test_lbl))):
labels[y]=i
y_test = np.array([labels[y] for y in test_lbl])
rand_indices = np.arange(X_train.shape[0])
np.random.shuffle(rand_indices)
X_train = X_train[rand_indices]
y_train = y_train[rand_indices]
rand_indices = np.arange(X_test.shape[0])
np.random.shuffle(rand_indices)
X_test = X_test[rand_indices]
y_test = y_test[rand_indices]
return X_train, y_train, X_test, y_test
def main(argv):
dataset_zip_directory = "UCRDatasets/{}".format(FLAGS.dataset_name)
try:
os.makedirs("Dataset")
except FileExistsError:
pass
if FLAGS.multivariate:
X_train, y_train, X_test, y_test = load_mv_ucr_data(FLAGS.dataset_name, dataset_zip_directory)
else:
X_train, y_train, X_test, y_test = load_ucr_data(FLAGS.dataset_name, dataset_zip_directory)
pkl.dump([X_train, y_train, X_test, y_test], open("Dataset/"+FLAGS.dataset_name+".pkl", "wb"))
with open('datasets_parameters.json') as jf:
info = json.load(jf)
info[FLAGS.dataset_name]={
"path": "Dataset/"+FLAGS.dataset_name+".pkl",
"SEG_SIZE": X_train.shape[2],
"CHANNEL_NB": X_train.shape[3],
"CLASS_NB": len(np.unique(y_train))
}
with open('datasets_parameters.json', 'w') as jf:
json.dump(info, jf, indent=2)
if __name__=="__main__":
flags.DEFINE_string('dataset_name', 'SyntheticControl ', 'Dataset name')
flags.DEFINE_boolean('multivariate', False, 'Dataset Is multivariate')
app.run(main)
| 5,982 | 35.481707 | 102 |
py
|
Robust-Training-for-Time-Series
|
Robust-Training-for-Time-Series-main/test_RO_TS_model.py
|
import sys
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3"
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="0,1"
import json
import pickle as pkl
from absl import app, flags
from CNNmodel import cnn_class
FLAGS = flags.FLAGS
def main(argv):
json_param = "datasets_parameters.json"
with open(json_param) as jf:
info = json.load(jf)
d = info[FLAGS.dataset_name]
path = d['path']
SEG_SIZE = d['SEG_SIZE']
CHANNEL_NB = d['CHANNEL_NB']
CLASS_NB = d['CLASS_NB']
#Data Reading
_, _, X_test, y_test = pkl.load(open(path, 'rb'))
#Model Training
experim_path = "Experiments/Experiment_"+FLAGS.dataset_name
rots_model = cnn_class("ROTS_"+FLAGS.dataset_name, SEG_SIZE, CHANNEL_NB, CLASS_NB, arch='2')
rots_train_path = "{}/TrainingRes/ROTS_lambda_{}_beta_{}".format(experim_path, FLAGS.rots_lambda, FLAGS.rots_beta)
rots_model.rots_train([],[],10, checkpoint_path=rots_train_path, new_train=False)
score = rots_model.score(X_test, y_test)
sys.stdout.write("\nPerformance of {} ROTS training: {:.2f} on test data\n".format(FLAGS.dataset_name, score))
if __name__=="__main__":
flags.DEFINE_string('dataset_name', 'SyntheticControl', 'Dataset name')
flags.DEFINE_float('rots_lambda', -1, 'ROTS lambda value')
flags.DEFINE_float('rots_beta', 5e-2, 'ROTS beta value')
app.run(main)
| 1,456 | 36.358974 | 118 |
py
|
Robust-Training-for-Time-Series
|
Robust-Training-for-Time-Series-main/GAK.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 16 20:31:54 2020
@author: BkTaha
"""
import numpy as np
import tensorflow as tf
def tf_cdist(s1, s2):
M1 = s1.shape[0]
M2 = s2.shape[0]
p1 = tf.matmul(tf.expand_dims(tf.reduce_sum(tf.square(s1), 1), 1),
tf.ones(shape=(1, M2), dtype=tf.float64))
p2 = tf.transpose(tf.matmul(
tf.reshape(tf.reduce_sum(tf.square(s2), 1), shape=[-1, 1]),
tf.ones(shape=(M1, 1), dtype=tf.float64),
transpose_b=True
))
res = tf.add(p1, p2) - 2 * tf.matmul(s1, s2, transpose_b=True)
return res
def tf_gak(S1, S2, gamma, path_limit=np.inf, random_kill=5):
assert S1.shape==S2.shape, "GAK input shapes mismatch"
assert len(S1.shape) > 2, "GAK input's shape error"
kill = lambda: np.random.choice([True, False], 1, p=[1/random_kill, 1-(1/random_kill)])[0]
gak_dist_list = []
for s_ind in range(S1.shape[0]):
s1 = S1[s_ind:s_ind+1]
s2 = S2[s_ind:s_ind+1]
if len(s1.shape)>2:
s1 = tf.reshape(s1, s1.shape[-2:])
s2 = tf.reshape(s2, s2.shape[-2:])
M1 = s1.shape[0]
kga_gram = tf.exp(- tf.divide(tf_cdist(s1, s2),gamma))
gak_dist = {}
for i in range(M1):
for j in range(M1):
gak_dist[(i,j)] = 0
gak_dist[(0, 0)] = kga_gram[0, 0]
for i in range(1, M1):
gak_dist[(0, i)] = tf.multiply(kga_gram[0, i], gak_dist[(0, i-1)])
gak_dist[(i, 0)] = tf.multiply(kga_gram[i, 0], gak_dist[(i-1, 0)])
for i in range(1, M1):
for j in range(1, M1):
if np.abs(i-j) > path_limit:
gak_dist[(i, j)] = 0
elif kill():
gak_dist[(i, j)] = 0
else:
gak_dist[(i, j)] = tf.multiply(kga_gram[i, j],
tf.reduce_sum(tf.convert_to_tensor([gak_dist[(i, j-1)],
gak_dist[(i-1, j)],
gak_dist[(i-1, j-1)]], dtype=tf.float64)))
gak_dist_list.append(tf.math.log(tf.convert_to_tensor(gak_dist[M1-1, M1-1], dtype=tf.float64)))
return gak_dist_list
| 2,366 | 34.328358 | 108 |
py
|
Robust-Training-for-Time-Series
|
Robust-Training-for-Time-Series-main/RO_TS.py
|
import sys
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = "3"
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="0"
import json
import tensorflow as tf
import pickle as pkl
from absl import app, flags
from CNNmodel import cnn_class
from hyparamtuning import get_hyparams
FLAGS = flags.FLAGS
def main(argv):
json_param = "datasets_parameters.json"
with open(json_param) as jf:
info = json.load(jf)
d = info[FLAGS.dataset_name]
path = d['path']
SEG_SIZE = d['SEG_SIZE']
CHANNEL_NB = d['CHANNEL_NB']
CLASS_NB = d['CLASS_NB']
#Data Reading
X_train, y_train, X_test, y_test = pkl.load(open(path, 'rb'))
sys.stdout.write("{} - Shape:{}".format(FLAGS.dataset_name, X_train.shape))
#Model Training
experim_path = "Experiments/Experiment_"+FLAGS.dataset_name
try:
os.makedirs(experim_path)
except FileExistsError:
pass
if FLAGS.rots_lambda==-1:
gamma, FLAGS.rots_lambda = get_hyparams(FLAGS.dataset_name)
else:
gamma, _ = get_hyparams(FLAGS.dataset_name)
rots_train_path = "{}/TrainingRes/ROTS_lambda_{}_beta_{}".format(experim_path, FLAGS.rots_lambda, FLAGS.rots_beta)
rots_model = cnn_class("ROTS_"+FLAGS.dataset_name, SEG_SIZE, CHANNEL_NB, CLASS_NB, arch='2')
if FLAGS.save_with_valid:
validation_size = int(0.1*X_train.shape[0])
X_valid = X_train[-validation_size:]
y_valid = y_train[-validation_size:]
total_iter = (X_train[:-validation_size].shape[0] * FLAGS.K)//FLAGS.batch + 1
train_ds = tf.data.Dataset.from_tensor_slices((X_train[:-validation_size], y_train[:-validation_size]))\
.shuffle(X_train[:-validation_size].shape[0]).repeat(FLAGS.K).batch(FLAGS.batch, drop_remainder=True)
rots_model.rots_train(train_ds, (SEG_SIZE, CHANNEL_NB), total_iter, gamma_gak=gamma, path_limit=FLAGS.rots_gak_sample, gak_random_kill=FLAGS.gak_random_kill,
new_train=True, checkpoint_path=rots_train_path,
X_valid=X_valid, y_valid=y_valid,
gamma_k=5e-2, lbda=FLAGS.rots_lambda, beta=FLAGS.rots_beta,
verbose=False)
else:
total_iter = (X_train.shape[0] * FLAGS.K)//FLAGS.batch + 1
train_ds = tf.data.Dataset.from_tensor_slices((X_train, y_train))\
.shuffle(X_train.shape[0]).repeat(FLAGS.K).batch(FLAGS.batch, drop_remainder=True)
rots_model.rots_train(train_ds, (SEG_SIZE, CHANNEL_NB), total_iter, gamma_gak=gamma, path_limit=FLAGS.rots_gak_sample, gak_random_kill=FLAGS.gak_random_kill,
new_train=True, checkpoint_path=rots_train_path,
gamma_k=5e-2, lbda=FLAGS.rots_lambda, beta=FLAGS.rots_beta,
verbose=False)
if __name__=="__main__":
flags.DEFINE_string('dataset_name', 'SyntheticControl', 'Dataset name')
flags.DEFINE_integer('batch', 11, 'Batch Size')
flags.DEFINE_integer('K', 10, 'RO-TS Iterations')
flags.DEFINE_integer('rots_gak_sample', 20, 'RO-TS GAK path sampling')
flags.DEFINE_integer('gak_random_kill', 5, 'RO-TS GAK path sampling random elimination')
flags.DEFINE_float('vs', 0.1, 'Validation ratio from training')
flags.DEFINE_float('rots_lambda', -1, 'RO-TS lambda value')
flags.DEFINE_float('rots_beta', 5e-2, 'RO-TS beta value')
flags.DEFINE_boolean('save_with_valid', False, 'Save best weight using validation set')
app.run(main)
| 3,526 | 45.407895 | 165 |
py
|
LightDepth
|
LightDepth-main/torch_implementation/config.py
|
import datetime
import os
import json
class Config():
def __init__(self,base_kw_args):
self.update(base_kw_args)
def update(self,overload_config):
for k, v in overload_config.items():
try:
if hasattr(self,k):
self.k = v
else:
setattr(self, k, v)
except AttributeError:
print(k,v)
def load_version(self,name):
print(f'version set to {name}')
log_dir = name.replace('ckpts','logs')
setting_dir = name.replace('ckpts','settings')
self.train_log_dir = os.path.join(log_dir,
'train')
self.test_log_dir = os.path.join(log_dir,
'test')
self.best_ckpt_path = os.path.join(name,
'best_models')
self.last_ckpt_path = os.path.join(name,
'last_model')
self.update(json.load(open(setting_dir+'.json','r')))
assert len(self._early_stopping_patience) == len(self.strategies),\
f'number of strategies except ground truth {len(self.strategies)}\
and early stopping patience {len(self._early_stopping_patience)} should be equal'
def create_version(self,version_name):
version_name += '_'+datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
log_dir = self._log_dir+version_name
ckpt_dir = self._ckpt_path+version_name
self.train_log_dir = os.path.join(log_dir,
'train')
self.test_log_dir = os.path.join(log_dir,
'test')
self.best_ckpt_path = os.path.join(ckpt_dir,
'best_models')
self.last_ckpt_path = os.path.join(ckpt_dir,
'last_model')
os.mkdir(log_dir)
os.mkdir(ckpt_dir)
os.mkdir(self.train_log_dir)
os.mkdir(self.test_log_dir)
os.mkdir(self.best_ckpt_path)
os.mkdir(self.last_ckpt_path)
json.dump({},open(self._setting_path+version_name+'.json','w'))
print('version created succesfully')
| 2,285 | 35.870968 | 93 |
py
|
LightDepth
|
LightDepth-main/torch_implementation/scripts/dataloaders.py
|
# This file is mostly taken from BTS; author: Jin Han Lee, with only slight modifications
import os
import random
import numpy as np
import torch
import torch.utils.data.distributed
from torch.nn import MaxPool2d
from PIL import Image
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms
def _is_pil_image(img):
return isinstance(img, Image.Image)
def _is_numpy_image(img):
return isinstance(img, np.ndarray) and (img.ndim in {2, 3})
def preprocessing_transforms(mode):
return transforms.Compose([
ToTensor(mode=mode)
])
class DepthDataLoader(object):
def __init__(self, config, mode):
self._early_stopping_patience = config._early_stopping_patience
if mode == 'train':
self.data_samples = DataLoadPreprocess(config, mode, transform=preprocessing_transforms(mode))
if config.distributed:
self.train_sampler = torch.utils.data.distributed.DistributedSampler(self.data_samples)
else:
self.train_sampler = None
self.data = DataLoader(self.data_samples, config.train_batch_size,
shuffle=(self.train_sampler is None),
num_workers=config.num_threads,
pin_memory=True,
sampler=self.train_sampler)
elif mode == 'online_eval':
self.data_samples = DataLoadPreprocess(config, mode, transform=preprocessing_transforms(mode))
if config.distributed: # redundant. here only for readability and to be more explicit
# Give whole test set to all processes (and perform/report evaluation only on one) regardless
self.eval_sampler = None
else:
self.eval_sampler = None
self.data = DataLoader(self.data_samples, 1,
shuffle=False,
num_workers=1,
pin_memory=False,
sampler=self.eval_sampler)
elif mode == 'test':
self.data_samples = DataLoadPreprocess(config, mode, transform=preprocessing_transforms(mode))
self.data = DataLoader(self.data_samples, 1, shuffle=False, num_workers=1)
else:
print('mode should be one of \'train, test, online_eval\'. Got {}'.format(mode))
@property
def early_stopping_patience(self):
return self._early_stopping_patience[self.data_samples.current_strategy]
def remove_leading_slash(s):
if s[0] == '/' or s[0] == '\\':
return s[1:]
return s
class DataLoadPreprocess(Dataset):
def __init__(self, config, mode, transform=None):
self.config = config
if mode == 'online_eval':
with open(config.test_filenames_file, 'r') as f:
self.filenames = f.readlines()
else:
with open(config.train_filenames_file, 'r') as f:
self.filenames = f.readlines()
self.mode = mode
self.transform = transform
self.strategies = config.strategies
self.current_strategy = 0
self.multiple_strategy = config.multiple_strategy
self.data_path = config.input_data_path
self.gt_path = config.groundtruth_data_path
self.do_kb_crop = config.do_kb_crop
self.dataset = config.dataset
self.do_random_rotate = config.do_random_rotate
self.rotation_degree = config.rotation_degree
self.input_height = config.input_height
self.input_width = config.input_width
def __getitem__(self, idx):
sample_path = self.filenames[idx]
image_path = os.path.join(self.data_path, remove_leading_slash(sample_path.split()[0]))
depth_path = os.path.join(self.gt_path, remove_leading_slash(sample_path.split()[1]))
image = Image.open(image_path)
depth_gt = Image.open(depth_path)
height = image.height
width = image.width
image = np.asarray(image, dtype=np.float32) / 255.0
depth_gt = np.asarray(depth_gt, dtype=np.float32)
depth_gt = np.expand_dims(depth_gt, axis=2)
if self.do_kb_crop:
top_margin = int(height - 352)
left_margin = int((width - 1216) / 2)
depth_gt = depth_gt[top_margin:top_margin + 352,left_margin:left_margin + 1216]
image = image[top_margin:top_margin + 352,left_margin:left_margin + 1216,:]
if self.mode == 'train':
# To avoid blank boundaries due to pixel registration
if self.dataset == 'nyu':
depth_gt = depth_gt.crop((43, 45, 608, 472))
image = image.crop((43, 45, 608, 472))
if self.do_random_rotate:
random_angle = (random.random() - 0.5) * 2 * self.rotation_degree
image = self.rotate_image(image, random_angle)
depth_gt = self.rotate_image(depth_gt, random_angle, flag=Image.NEAREST)
image, depth_gt = self.random_crop(image, depth_gt, self.input_height, self.input_width)
image, depth_gt = self.train_augment(image, depth_gt)
if self.multiple_strategy:
depth_gt = self.dilation(depth_gt,**self.strategies[self.current_strategy])
if self.dataset == 'nyu':
depth_gt = depth_gt / 1000.0
else:
depth_gt = depth_gt / 256.0
sample = {'image': image, 'depth': depth_gt}
if self.transform:
sample = self.transform(sample)
return sample
def dilation(self,depth_gt,pool_size=(2,2),iterations=1): # TODO
if iterations> 0:
for _ in range(iterations):
depth_gt = MaxPool2d(kernel_size=pool_size)(depth_gt)
print(depth_gt.shape)
return depth_gt
def rotate_image(self, image, angle, flag=Image.BILINEAR):
result = image.rotate(angle, resample=flag)
return result
def random_crop(self, img, depth, height, width):
assert img.shape[0] >= height
assert img.shape[1] >= width
assert img.shape[0] == depth.shape[0]
assert img.shape[1] == depth.shape[1]
x = random.randint(0, img.shape[1] - width)
y = random.randint(0, img.shape[0] - height)
img = img[y:y + height, x:x + width, :]
depth = depth[y:y + height, x:x + width, :]
return img, depth
def train_augment(self, image, depth_gt):
# Random flipping
do_flip = random.random()
if do_flip > 0.5:
image = (image[:, ::-1, :]).copy()
depth_gt = (depth_gt[:, ::-1, :]).copy()
# Random gamma, brightness, color augmentation
do_augment = random.random()
if do_augment > 0.5:
image = self.augment_image(image)
return image, depth_gt
def augment_image(self, image):
# gamma augmentation
gamma = random.uniform(0.9, 1.1)
image_aug = image ** gamma
# brightness augmentation
if self.dataset == 'nyu':
brightness = random.uniform(0.75, 1.25)
else:
brightness = random.uniform(0.9, 1.1)
image_aug = image_aug * brightness
# color augmentation
colors = np.random.uniform(0.9, 1.1, size=3)
white = np.ones((image.shape[0], image.shape[1]))
color_image = np.stack([white * colors[i] for i in range(3)], axis=2)
image_aug *= color_image
image_aug = np.clip(image_aug, 0, 1)
return image_aug
def __len__(self):
return len(self.filenames)
class ToTensor(object):
def __init__(self, mode):
self.mode = mode
self.normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def __call__(self, sample):
image = sample['image']
image = self.to_tensor(image)
image = self.normalize(image)
depth = sample['depth']
if self.mode == 'train':
depth = self.to_tensor(depth)
return {'image': image, 'depth': depth}
def to_tensor(self, pic):
if not (_is_pil_image(pic) or _is_numpy_image(pic)):
raise TypeError(
'pic should be PIL Image or ndarray. Got {}'.format(type(pic)))
if isinstance(pic, np.ndarray):
img = torch.from_numpy(pic.transpose((2, 0, 1)))
return img
# handle PIL Image
if pic.mode == 'I':
img = torch.from_numpy(np.array(pic, np.int32, copy=False))
elif pic.mode == 'I;16':
img = torch.from_numpy(np.array(pic, np.int16, copy=False))
else:
img = torch.ByteTensor(torch.ByteStorage.from_buffer(pic.tobytes()))
# PIL image mode: 1, L, P, I, F, RGB, YCbCr, RGBA, CMYK
if pic.mode == 'YCbCr':
nchannel = 3
elif pic.mode == 'I;16':
nchannel = 1
else:
nchannel = len(pic.mode)
img = img.view(pic.size[1], pic.size[0], nchannel)
img = img.transpose(0, 1).transpose(0, 2).contiguous()
if isinstance(img, torch.ByteTensor):
return img.float()
else:
return img
| 9,302 | 36.063745 | 109 |
py
|
LightDepth
|
LightDepth-main/torch_implementation/scripts/models/ordinary_unet.py
|
import torch
class OrdinaryUNet():
def __init__(self,config):
def UpConv2D(tensor, filters, name, concat_with):
up_i = torch.nn.Upsample((2, 2),mode='bilinear')(tensor)
up_i = torch.cat([up_i, encoder.get_layer(concat_with).output]) # Skip connection
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convA')(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convB')(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
return up_i
| 625 | 51.166667 | 110 |
py
|
LightDepth
|
LightDepth-main/tf_implementation/config.py
|
import datetime
import os
import json
class Config:
def __init__(self,base_kw_args):
self.update(base_kw_args)
def update(self,overload_config):
if overload_config == {}:
print('updated with empty config!')
for k, v in overload_config.items():
try:
setattr(self, k, v)
except AttributeError:
print(k,v)
def load_version(self,name):
print(f'version set to {name}')
log_dir = name.replace('ckpts','logs')
setting_dir = name.replace('ckpts','settings')
self.train_log_dir = os.path.join(log_dir,
'train')
self.test_log_dir = os.path.join(log_dir,
'test')
self.best_ckpt_path = os.path.join(name,
'best_models')
self.last_ckpt_path = os.path.join(name,
'last_model')
self.update(json.load(open(setting_dir+'.json','r')))
assert len(self._early_stopping_patience) == len(self.strategies),\
f'number of strategies except ground truth {len(self.strategies)}\
and early stopping patience {len(self._early_stopping_patience)} should be equal'
def create_version(self,version_name):
version_name += '_'+datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
log_dir = self._log_dir+version_name
ckpt_dir = self._ckpt_path+version_name
self.train_log_dir = os.path.join(log_dir,
'train')
self.test_log_dir = os.path.join(log_dir,
'test')
self.best_ckpt_path = os.path.join(ckpt_dir,
'best_models')
self.last_ckpt_path = os.path.join(ckpt_dir,
'last_model')
os.mkdir(log_dir)
os.mkdir(ckpt_dir)
os.mkdir(self.train_log_dir)
os.mkdir(self.test_log_dir)
os.mkdir(self.best_ckpt_path)
os.mkdir(self.last_ckpt_path)
json.dump({},open(self._setting_path+version_name+'.json','w'))
print('version created succesfully')
| 2,272 | 36.262295 | 93 |
py
|
LightDepth
|
LightDepth-main/tf_implementation/scripts/utils.py
|
from glob import glob
import os
import tensorflow as tf
import numpy as np
def good(image):
num_channels = image.shape[-1]
means = [123.68, 116.779, 103.939]
channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
for i in range(num_channels):
channels[i] += means[i]
temp = tf.concat(axis=2, values=channels)
return (temp - np.amin(temp))/np.amax(temp)
def get_ground_truth_path(t):
u = t.split('/')
u[1] = 'data_depth_annotated'
u.pop(2)
u.insert(3,'proj_depth/groundtruth')
u.pop(-2)
return '/'.join(u)
def update_train_filenames_file(config):
if os.path.exists(config.train_filenames_file):
basename = os.path.basename(config.train_filenames_file)
dirname = os.path.dirname(config.train_filenames_file)
oldfile_name = os.path.join(dirname,'deprecated_'+basename)
os.rename(config.train_filenames_file,oldfile_name)
print('reading the test filenames file')
test_data = list(open(config.test_filenames_file,'r'))
print('splitting the test filenames file')
test_data = set([t.split(' ')[0] for t in test_data])
print('reading training data recursively')
train_data = glob(os.path.join(config.input_data_path,
'*/*[!()]/image_02/data/*.png'),
recursive=True)
intersection_with_test_data = test_data.intersection(train_data)
print(f'len test {len(test_data)}')
print(f'len train {len(train_data)-len(intersection_with_test_data)}')
print(f'len inter {len(intersection_with_test_data)}')
for item in intersection_with_test_data:
train_data.remove(item)
gt_data = [get_ground_truth_path(p) for p in train_data]
missing_counter = 0
for v in gt_data:
if not os.path.exists(v):
missing_counter+=1
print(f'number of missing examples {missing_counter}')
print('generating paths')
train_data = [u+' '+v for u,v in zip(train_data,gt_data) if os.path.exists(v)]
print(f'writing in {config.train_filenames_file}')
with open(config.train_filenames_file,'w') as buffer:
buffer.write('\n'.join(train_data))
| 2,184 | 36.033898 | 82 |
py
|
LightDepth
|
LightDepth-main/tf_implementation/scripts/dataloaders.py
|
import numpy as np
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras.layers import MaxPooling2D
import os
class OrdinaryDataloader(object):
def __init__(self, config,is_training=True,debug=False):
self.do_flip = config.do_flip
self.do_augment = config.do_augment
self.do_rotate = config.do_rotate
self.do_kb_crop = config.do_kb_crop
self.use_normalized_image = config.use_normalized_image
self.is_training = is_training
self.debug = debug
if self.is_training:
self.multiple_strategy = config.multiple_strategy
else:
self.multiple_strategy = False
self.current_strategy = 0
self._early_stopping_patience = config._early_stopping_patience
self.degree = np.deg2rad(tf.constant(config.rotation_degree,dtype=tf.float16))
self.height = config.input_height
self.width = config.input_width
self.input_data_path = config.input_data_path
self.groundtruth_data_path = config.groundtruth_data_path
self.strategies = config.strategies
if self.is_training:
filenames = list(open(config.train_filenames_file))
self.batch_size = config.train_batch_size
else:
filenames = list(open(config.test_filenames_file))
self.batch_size = config.test_batch_size
if config.train_only_on_the_first_image:
filenames = [tf.identity(filenames[0]) for i in range(8)]
self.num_elements = len(filenames)
self.loader = tf.data.Dataset.from_tensor_slices(filenames)
if self.is_training:
if not self.debug:
self.loader = self.loader.shuffle(self.num_elements,
reshuffle_each_iteration=True)
self.loader = self.loader.repeat()
self.loader = self.loader.map(self.parse_function)
self.loader = self.loader.map(self.train_preprocess)
self.loader = self.loader.map(
lambda x,y: tf.py_function(
self.lazy_preprocess,
[x,y],
[tf.float32,
tf.float32]))
else:
self.loader = self.loader.map(self.parse_function)
self.loader = self.loader.map(self.test_preprocess)
self.loader = self.loader.batch(self.batch_size).prefetch(2)
@property
def early_stopping_patience(self):
if self.multiple_strategy:
return self._early_stopping_patience[self.current_strategy]
else:
return self._early_stopping_patience[-1]
@property
def num_strategies(self):
return len(self.strategies)
def parse_function(self, line):
paths = tf.strings.split(line)
image = tf.image.decode_png(tf.io.read_file(self.input_data_path+paths[0]))
image = tf.image.convert_image_dtype(image, tf.float32)
depth_gt = tf.image.decode_png(tf.io.read_file(self.groundtruth_data_path+paths[1]),
channels=0,
dtype=tf.uint16)
depth_gt = tf.cast(depth_gt, tf.float32) / 256.0
if self.do_kb_crop:
print('Cropping training images as kitti benchmark images')
height = tf.shape(image)[0]
width = tf.shape(image)[1]
top_margin = tf.cast(height - 352,dtype=tf.int32)
left_margin = tf.cast((width - 1216) / 2,dtype=tf.int32)
depth_gt = depth_gt[top_margin:top_margin + 352, left_margin:left_margin + 1216, :]
image = image[top_margin:top_margin + 352, left_margin:left_margin + 1216, :]
return image, depth_gt
def train_preprocess(self, image,depth_gt):
if self.do_rotate:
print('Rotating training images')
random_angle = tf.random.uniform([], - self.degree, self.degree)
image = tfa.image.rotate(image, random_angle, interpolation='nearest')
depth_gt = tfa.image.rotate(depth_gt, random_angle, interpolation='nearest')
image, depth_gt = self.crop_fixed_size(image, depth_gt)
if self.do_flip:
do_flip = tf.random.uniform([], 0, 1)
image = tf.cond(do_flip > 0.5, lambda: tf.image.flip_left_right(image), lambda: image)
depth_gt = tf.cond(do_flip > 0.5, lambda: tf.image.flip_left_right(depth_gt), lambda: depth_gt)
if self.do_augment:
do_augment = tf.random.uniform([], 0, 1)
image = tf.cond(do_augment > 0.5, lambda: self.augment_image(image), lambda: image)
image.set_shape([self.height, self.width, 3])
depth_gt.set_shape([self.height, self.width, 1])
if self.use_normalized_image:
image *= 255.0
image = self.mean_image_subtraction(image,
[123.68, 116.78, 103.94])
return image, depth_gt
def test_preprocess(self,image,depth_gt):
image.set_shape([None, None, 3])
depth_gt.set_shape([None, None, 1])
if self.use_normalized_image:
image *= 255.0
image = self.mean_image_subtraction(image,
[123.68, 116.78, 103.94])
return image, depth_gt
def lazy_preprocess(self, image, depth_gt):
if self.multiple_strategy:
strategy = self.strategies[self.current_strategy]
else:
strategy = self.strategies[-1]
depth_gt = self.dilation(depth_gt,**strategy)
return image, depth_gt
def dilation(self, depth_gt, pool_size=(2,2), iterations=1):
if iterations > 0:
depth_gt = tf.expand_dims(depth_gt, axis=0)
for _ in range(iterations):
depth_gt = MaxPooling2D(pool_size=pool_size)(depth_gt)
depth_gt = tf.squeeze(depth_gt,axis=0)
depth_gt = tf.image.resize(depth_gt,
(self.height,self.width),
method='nearest')
return depth_gt
def crop_fixed_size(self, image, depth_gt):
image_depth = tf.concat([image, depth_gt], 2)
if not self.debug:
image_depth_cropped = tf.image.random_crop(image_depth, [self.height, self.width, 4])
else:
image_depth_cropped = image_depth[100:100+self.height, 365:365+self.width, :]
image_cropped = image_depth_cropped[:, :, 0:3]
depth_gt_cropped = tf.expand_dims(image_depth_cropped[:, :, 3], 2)
return image_cropped, depth_gt_cropped
def augment_image(self, image):
# gamma augmentation
gamma = tf.random.uniform([], 0.9, 1.1)
image_aug = image ** gamma
brightness = tf.random.uniform([], 0.9, 1.1)
image_aug = image_aug * brightness
# color augmentation
colors = tf.random.uniform([3], 0.9, 1.1)
white = tf.ones([tf.shape(image)[0], tf.shape(image)[1]])
color_image = tf.stack([white * colors[i] for i in range(3)], axis=2)
image_aug *= color_image
return image_aug
@staticmethod
def mean_image_subtraction(image, means):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
num_channels = image.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError(f'len(means)==3 must match the number of channels == {num_channels}')
channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=2, values=channels)
| 8,542 | 39.488152 | 107 |
py
|
LightDepth
|
LightDepth-main/tf_implementation/scripts/models/efficient_unet.py
|
from tensorflow.keras.layers import Layer, InputSpec
import tensorflow as tf
from tensorflow.keras import applications
from tensorflow.keras.layers import Conv2D, Concatenate, LeakyReLU, UpSampling2D
import keras.backend as K
import keras.utils.conv_utils as conv_utils
from tensorflow.keras.models import Model
import numpy as np
def normalize_data_format(value):
if value is None:
value = K.image_data_format()
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
class BilinearUpSampling2D(Layer):
def __init__(self, size=(2, 2), data_format=None, **kwargs):
super(BilinearUpSampling2D, self).__init__(**kwargs)
self.data_format = normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[3] if input_shape[3] is not None else None
return (input_shape[0],
input_shape[1],
height,
width)
elif self.data_format == 'channels_last':
height = self.size[0] * input_shape[1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[2] if input_shape[2] is not None else None
return (input_shape[0],
height,
width,
input_shape[3])
def call(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[3] if input_shape[3] is not None else None
elif self.data_format == 'channels_last':
height = self.size[0] * input_shape[1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[2] if input_shape[2] is not None else None
return tf.image.resize(inputs, [height, width], method=tf.image.ResizeMethod.BILINEAR)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(BilinearUpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class EfficientUNet():
def __init__(self,config):
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
config.initial_learning_rate,
decay_steps=config.decay_steps,
decay_rate=config.decay_rate)
self.optimizer = getattr(tf.optimizers,config.optimizer)(learning_rate=lr_schedule)
self.max_depth = tf.constant(config.max_depth)
self.min_depth = config.min_depth
self.model_loss = getattr(self,config.loss_fn)
self.garg_crop = config.garg_crop
self.eigen_crop = config.eigen_crop
self.do_flip_predict = config.do_flip_predict
self.eps = 1e-5
def UpConv2D(tensor, filters, name, concat_with):
up_i = BilinearUpSampling2D((2, 2), name=name+'_upsampling2d')(tensor)
up_i = Concatenate(name=name+'_concat')([up_i, encoder.get_layer(concat_with).output]) # Skip connection
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convA')(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convB')(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
return up_i
encoder = getattr(applications.efficientnet,config.encoder)(input_shape=(None, None, 3), include_top=False)
encoder_output_shape = encoder.output.shape
decode_filters = int(encoder_output_shape[-1])
decoder = Conv2D(filters=decode_filters,
kernel_size=1, padding='same',
input_shape=encoder_output_shape,
name='conv2')(encoder.output)
decoder = UpConv2D(decoder, int(decode_filters/2),
'up1', concat_with='block4a_dwconv')
decoder = UpConv2D(decoder, int(decode_filters/4),
'up2', concat_with='block3a_dwconv')
decoder = UpConv2D(decoder, int(decode_filters/8),
'up3', concat_with='block2a_dwconv')
decoder = UpConv2D(decoder, int(decode_filters/16),
'up4', concat_with='block1c_activation')
# decoder = UpConv2D(decoder, int(decode_filters/32),
# 'up5', concat_with=encoder.input.name)
outputs = Conv2D(filters=1,
kernel_size=3,
strides=1,
padding='same',
name='conv3',
activation=config.decoder_last_layer_activation_fn)(decoder)
outputs = UpSampling2D()(outputs)
if config.decoder_last_layer_activation_fn == 'sigmoid':
outputs=outputs*self.max_depth + self.eps
else:
outputs = outputs - tf.reduce_min(outputs)
outputs = outputs / tf.reduce_max(outputs)
outputs = (outputs*(self.max_depth-self.min_depth))+self.min_depth
self.model = Model(inputs=encoder.input, outputs=outputs)
@tf.function
def test_step(self,image,depth_gt):
depth_est = self.model(image, training=False)
loss_value = self.model_loss(depth_est, depth_gt)
return loss_value,depth_est
@tf.function
def train_step(self, image, depth_gt):
with tf.GradientTape() as tape:
depth_est = self.model(image, training=True)
loss_value = self.model_loss(depth_est, depth_gt)
grads = tape.gradient(loss_value, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
return loss_value,tf.reduce_max(depth_est),tf.reduce_min(depth_est)
def compute_metrics(self,image,depth_gt):
valid_mask = np.logical_and(depth_gt > self.min_depth,
depth_gt < self.max_depth)
if self.garg_crop or self.eigen_crop:
batches, gt_height, gt_width, channels = depth_gt.shape
eval_mask = np.zeros(valid_mask.shape)
if self.garg_crop:
eval_mask[:,int(0.40810811 * gt_height):int(0.99189189 * gt_height),
int(0.03594771 * gt_width):int(0.96405229 * gt_width),:] = 1
elif self.eigen_crop:
# if self.dataset == 'kitti':
eval_mask[:,int(0.3324324 * gt_height):int(0.91351351 * gt_height),
int(0.0359477 * gt_width):int(0.96405229 * gt_width),:] = 1
# else:
# eval_mask[:,45:471, 41:601,:] = 1
depth_est = self.model(image, training=False)
if self.do_flip_predict:
depth_est_lr = self.model(image[...,::-1,:], training=False)
depth_est_final = (0.5*(depth_est + depth_est_lr))[valid_mask]
else:
depth_est_final = depth_est[valid_mask]
depth_gt = depth_gt[valid_mask]
thresh = np.maximum((depth_gt / depth_est_final), (depth_est_final / depth_gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
abs_rel = np.mean(np.abs(depth_gt - depth_est_final) / depth_gt)
sq_rel = np.mean(((depth_gt - depth_est_final) ** 2) / depth_gt)
rmse = (depth_gt - depth_est_final) ** 2
rmse = np.sqrt(np.mean(rmse))
rmse_log = (np.log(depth_gt) - np.log(depth_est_final)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
err = np.log(depth_est_final) - np.log(depth_gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
log_10 = (np.abs(np.log10(depth_gt) - np.log10(depth_est_final))).mean()
return dict(a1=a1, a2=a2, a3=a3,
abs_rel=abs_rel, rmse=rmse, log_10=log_10,
rmse_log=rmse_log, silog=silog, sq_rel=sq_rel)
@tf.function
def bts_loss(self,depth_est,depth_gt):
mask = depth_gt > self.min_depth
depth_gt_masked = tf.boolean_mask(depth_gt, mask)
depth_est_masked = tf.boolean_mask(depth_est, mask)
d = tf.math.log(depth_est_masked) - tf.math.log(depth_gt_masked)
return tf.sqrt(tf.reduce_mean(d ** 2) - 0.85 * (tf.reduce_mean(d) ** 2)) * 10.0
@tf.function
def kitti_loss(self,depth_est,depth_gt):
mask = depth_gt > self.min_depth
depth_gt_masked = tf.boolean_mask(depth_gt, mask)
depth_est_masked = tf.boolean_mask(depth_est, mask)
d = tf.math.log(depth_est_masked) - tf.math.log(depth_gt_masked)
return tf.reduce_mean(d ** 2) - (tf.reduce_mean(d) ** 2)
@tf.function
def densedepth_loss(self,depth_est, depth_gt, theta=0.1, maxDepthVal=1000.0/10.0):
l_depth = K.mean(K.abs(depth_est - depth_gt), axis=-1)
dy_true, dx_true = tf.image.image_gradients(depth_gt)
dy_pred, dx_pred = tf.image.image_gradients(depth_est)
l_edges = K.mean(K.abs(dy_pred - dy_true) + K.abs(dx_pred - dx_true), axis=-1)
l_ssim = K.clip((1 - tf.image.ssim(depth_gt, depth_est, maxDepthVal)) * 0.5, 0, 1)
w1 = 1.0
w2 = 1.0
w3 = theta
return tf.reduce_mean((w1 * l_ssim) + (w2 * K.mean(l_edges)) + (w3 * K.mean(l_depth)))
| 9,917 | 45.345794 | 116 |
py
|
LightDepth
|
LightDepth-main/tf_implementation/scripts/models/ordinary_unet.py
|
from tensorflow.keras.layers import Layer, InputSpec
import tensorflow as tf
from tensorflow.keras import applications
from tensorflow.keras.layers import Conv2D, Concatenate, LeakyReLU, UpSampling2D
import keras.backend as K
import keras.utils.conv_utils as conv_utils
from tensorflow.keras.models import Model
import numpy as np
def normalize_data_format(value):
if value is None:
value = K.image_data_format()
data_format = value.lower()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('The `data_format` argument must be one of '
'"channels_first", "channels_last". Received: ' +
str(value))
return data_format
class BilinearUpSampling2D(Layer):
def __init__(self, size=(2, 2), data_format=None, **kwargs):
super(BilinearUpSampling2D, self).__init__(**kwargs)
self.data_format = normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[3] if input_shape[3] is not None else None
return (input_shape[0],
input_shape[1],
height,
width)
elif self.data_format == 'channels_last':
height = self.size[0] * input_shape[1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[2] if input_shape[2] is not None else None
return (input_shape[0],
height,
width,
input_shape[3])
def call(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[3] if input_shape[3] is not None else None
elif self.data_format == 'channels_last':
height = self.size[0] * input_shape[1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[2] if input_shape[2] is not None else None
return tf.image.resize(inputs, [height, width], method=tf.image.ResizeMethod.BILINEAR)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(BilinearUpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class OrdinaryUNet():
def __init__(self,config):
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
config.initial_learning_rate,
decay_steps=config.decay_steps,
decay_rate=config.decay_rate)
self.optimizer = getattr(tf.optimizers,config.optimizer)(learning_rate=lr_schedule)
self.max_depth = tf.constant(config.max_depth)
self.min_depth = config.min_depth
self.model_loss = getattr(self,config.loss_fn)
self.garg_crop = config.garg_crop
self.eigen_crop = config.eigen_crop
self.do_flip_predict = config.do_flip_predict
self.eps = 1e-5
def UpConv2D(tensor, filters, name, concat_with):
up_i = BilinearUpSampling2D((2, 2), name=name+'_upsampling2d')(tensor)
up_i = Concatenate(name=name+'_concat')([up_i, encoder.get_layer(concat_with).output]) # Skip connection
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convA')(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
up_i = Conv2D(filters=filters, kernel_size=3, strides=1, padding='same', name=name+'_convB')(up_i)
up_i = LeakyReLU(alpha=0.2)(up_i)
return up_i
encoder = getattr(applications,config.encoder)(input_shape=(512, 256, 3), include_top=False)
encoder_output_shape = encoder.output.shape
decode_filters = int(encoder_output_shape[-1])
decoder = Conv2D(filters=decode_filters,
kernel_size=1, padding='same',
input_shape=encoder_output_shape,
name='conv2')(encoder.output)
decoder = UpConv2D(decoder, int(decode_filters/2),
'up1', concat_with='pool3_pool')
decoder = UpConv2D(decoder, int(decode_filters/4),
'up2', concat_with='pool2_pool')
decoder = UpConv2D(decoder, int(decode_filters/8),
'up3', concat_with='pool1')
decoder = UpConv2D(decoder, int(decode_filters/16),
'up4', concat_with='conv1/relu')
# decoder = UpConv2D(decoder, int(decode_filters/32),
# 'up5', concat_with=encoder.input.name)
outputs = Conv2D(filters=1, kernel_size=3, strides=1,
padding='same', name='conv3',activation=config.decoder_last_layer_activation_fn)(decoder)
outputs = UpSampling2D()(outputs)
if config.decoder_last_layer_activation_fn == 'sigmoid':
outputs=outputs*self.max_depth + self.eps
else:
outputs = outputs - tf.reduce_min(outputs)
outputs = outputs / tf.reduce_max(outputs)
outputs = (outputs*(self.max_depth-self.min_depth))+self.min_depth
self.model = Model(inputs=encoder.input, outputs=outputs)
@tf.function
def test_step(self,image,depth_gt):
depth_est = self.model(image, training=False)
loss_value = self.model_loss(depth_est, depth_gt)
return loss_value,depth_est
@tf.function
def train_step(self, image, depth_gt):
with tf.GradientTape() as tape:
depth_est = self.model(image, training=True)
loss_value = self.model_loss(depth_est, depth_gt)
grads = tape.gradient(loss_value, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
return loss_value,tf.reduce_max(depth_est),tf.reduce_min(depth_est)
def compute_metrics(self,image,depth_gt):
valid_mask = np.logical_and(depth_gt > self.min_depth,
depth_gt < self.max_depth)
if self.garg_crop or self.eigen_crop:
batches, gt_height, gt_width, channels = depth_gt.shape
eval_mask = np.zeros(valid_mask.shape)
if self.garg_crop:
eval_mask[:,int(0.40810811 * gt_height):int(0.99189189 * gt_height),
int(0.03594771 * gt_width):int(0.96405229 * gt_width),:] = 1
elif self.eigen_crop:
# if self.dataset == 'kitti':
eval_mask[:,int(0.3324324 * gt_height):int(0.91351351 * gt_height),
int(0.0359477 * gt_width):int(0.96405229 * gt_width),:] = 1
# else:
# eval_mask[:,45:471, 41:601,:] = 1
depth_est = self.model(image, training=False)
if self.do_flip_predict:
depth_est_lr = self.model(image[...,::-1,:], training=False)
depth_est_final = (0.5*(depth_est + depth_est_lr))[valid_mask]
else:
depth_est_final = depth_est[valid_mask]
depth_gt = depth_gt[valid_mask]
thresh = np.maximum((depth_gt / depth_est_final), (depth_est_final / depth_gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
abs_rel = np.mean(np.abs(depth_gt - depth_est_final) / depth_gt)
sq_rel = np.mean(((depth_gt - depth_est_final) ** 2) / depth_gt)
rmse = (depth_gt - depth_est_final) ** 2
rmse = np.sqrt(np.mean(rmse))
rmse_log = (np.log(depth_gt) - np.log(depth_est_final)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
err = np.log(depth_est_final) - np.log(depth_gt)
silog = np.sqrt(np.mean(err ** 2) - np.mean(err) ** 2) * 100
log_10 = (np.abs(np.log10(depth_gt) - np.log10(depth_est_final))).mean()
return dict(a1=a1, a2=a2, a3=a3,
abs_rel=abs_rel, rmse=rmse, log_10=log_10,
rmse_log=rmse_log, silog=silog, sq_rel=sq_rel)
@tf.function
def bts_loss(self,depth_est,depth_gt):
mask = depth_gt > self.min_depth
depth_gt_masked = tf.boolean_mask(depth_gt, mask)
depth_est_masked = tf.boolean_mask(depth_est, mask)
d = tf.math.log(depth_est_masked) - tf.math.log(depth_gt_masked)
return tf.sqrt(tf.reduce_mean(d ** 2) - 0.85 * (tf.reduce_mean(d) ** 2)) * 10.0
@tf.function
def kitti_loss(self,depth_est,depth_gt):
mask = depth_gt > self.min_depth
depth_gt_masked = tf.boolean_mask(depth_gt, mask)
depth_est_masked = tf.boolean_mask(depth_est, mask)
d = tf.math.log(depth_est_masked) - tf.math.log(depth_gt_masked)
return tf.reduce_mean(d ** 2) - (tf.reduce_mean(d) ** 2)
@tf.function
def densedepth_loss(self,depth_est, depth_gt, theta=0.1, maxDepthVal=1000.0/10.0):
l_depth = K.mean(K.abs(depth_est - depth_gt), axis=-1)
dy_true, dx_true = tf.image.image_gradients(depth_gt)
dy_pred, dx_pred = tf.image.image_gradients(depth_est)
l_edges = K.mean(K.abs(dy_pred - dy_true) + K.abs(dx_pred - dx_true), axis=-1)
l_ssim = K.clip((1 - tf.image.ssim(depth_gt, depth_est, maxDepthVal)) * 0.5, 0, 1)
w1 = 1.0
w2 = 1.0
w3 = theta
return tf.reduce_mean((w1 * l_ssim) + (w2 * K.mean(l_edges)) + (w3 * K.mean(l_depth)))
| 9,771 | 45.312796 | 116 |
py
|
dpkt
|
dpkt-master/setup.py
|
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
package_name = 'dpkt'
description = 'fast, simple packet creation / parsing, with definitions for the basic TCP/IP protocols'
readme = open('README.md').read()
requirements = []
# PyPI Readme
long_description = open('README.md').read()
# Pull in the package
package = __import__(package_name)
package_version = package.__version__
if "bdist_msi" in sys.argv:
# The MSI build target does not support a 4 digit version, e.g. '1.2.3.4'
# therefore we remove the last digit.
package_version, _, _ = package_version.rpartition('.')
setup(name=package_name,
version=package_version,
author=package.__author__,
author_email=package.__author_email__,
url=package.__url__,
description=description,
long_description=long_description,
long_description_content_type='text/markdown',
packages=['dpkt'],
install_requires=requirements,
license='BSD',
zip_safe=False,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
])
| 1,701 | 33.04 | 103 |
py
|
dpkt
|
dpkt-master/examples/print_icmp.py
|
#!/usr/bin/env python
"""
This example expands on the print_packets example. It checks for ICMP packets and displays the ICMP contents.
"""
import dpkt
import datetime
from dpkt.utils import mac_to_str, inet_to_str
def print_icmp(pcap):
"""Print out information about each packet in a pcap
Args:
pcap: dpkt pcap reader object (dpkt.pcap.Reader)
"""
# For each packet in the pcap process the contents
for timestamp, buf in pcap:
# Unpack the Ethernet frame (mac src/dst, ethertype)
eth = dpkt.ethernet.Ethernet(buf)
# Make sure the Ethernet data contains an IP packet
if not isinstance(eth.data, dpkt.ip.IP):
print('Non IP Packet type not supported %s\n' % eth.data.__class__.__name__)
continue
# Now grab the data within the Ethernet frame (the IP packet)
ip = eth.data
# Now check if this is an ICMP packet
if isinstance(ip.data, dpkt.icmp.ICMP):
icmp = ip.data
# Pull out fragment information (flags and offset all packed into off field, so use bitmasks)
do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)
more_fragments = bool(ip.off & dpkt.ip.IP_MF)
fragment_offset = ip.off & dpkt.ip.IP_OFFMASK
# Print out the info
print('Timestamp: ', str(datetime.datetime.utcfromtimestamp(timestamp)))
print('Ethernet Frame: ', mac_to_str(eth.src), mac_to_str(eth.dst), eth.type)
print('IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)' %
(inet_to_str(ip.src), inet_to_str(ip.dst), ip.len, ip.ttl,
do_not_fragment, more_fragments, fragment_offset))
print('ICMP: type:%d code:%d checksum:%d data: %s\n' %
(icmp.type, icmp.code, icmp.sum, repr(icmp.data)))
def test():
"""Open up a test pcap file and print out the packets"""
with open('data/dns_icmp.pcap', 'rb') as f:
pcap = dpkt.pcap.Reader(f)
print_icmp(pcap)
if __name__ == '__main__':
test()
| 2,076 | 34.810345 | 109 |
py
|
dpkt
|
dpkt-master/examples/print_dns_truncated.py
|
"""
Use DPKT to read in a pcap file and print out the contents of truncated
DNS packets. This example show how to read/handle truncated packets
"""
import sys
import dpkt
import datetime
from dpkt.utils import mac_to_str, inet_to_str, make_dict
from pprint import pprint
def print_packet(buf):
"""Print out information about each packet in a pcap
Args:
buf: buffer of bytes for this packet
"""
print(type(buf))
# Unpack the Ethernet frame (mac src/dst, ethertype)
eth = dpkt.ethernet.Ethernet(buf)
print('Ethernet Frame: ', mac_to_str(eth.src), mac_to_str(eth.dst), eth.type)
# Make sure the Ethernet data contains an IP packet
if not isinstance(eth.data, dpkt.ip.IP):
print('Non IP Packet type not supported %s\n' % eth.data.__class__.__name__)
return
# Now unpack the data within the Ethernet frame (the IP packet)
# Pulling out src, dst, length, fragment info, TTL, and Protocol
ip = eth.data
# Pull out fragment information (flags and offset all packed into off field, so use bitmasks)
do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)
more_fragments = bool(ip.off & dpkt.ip.IP_MF)
fragment_offset = ip.off & dpkt.ip.IP_OFFMASK
# Print out the info
print('IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)' %
(inet_to_str(ip.src), inet_to_str(ip.dst), ip.len, ip.ttl, do_not_fragment,
more_fragments, fragment_offset))
# Check for UDP in the transport layer
if isinstance(ip.data, dpkt.udp.UDP):
# Set the UDP data
udp = ip.data
print('UDP: sport={:d} dport={:d} sum={:d} ulen={:d}'.format(udp.sport, udp.dport,
udp.sum, udp.ulen))
# Now see if we can parse the contents of the truncated DNS request
try:
dns = dpkt.dns.DNS()
dns.unpack(udp.data)
except (dpkt.dpkt.NeedData, dpkt.dpkt.UnpackError, Exception) as e:
print('\nError Parsing DNS, Might be a truncated packet...')
print('Exception: {!r}'.format(e))
# Print out the DNS info
print('Queries: {:d}'.format(len(dns.qd)))
for query in dns.qd:
print('\t {:s} Type:{:d}'.format(query.name, query.type))
print('Answers: {:d}'.format(len(dns.an)))
for answer in dns.an:
if answer.type == 5:
print('\t {:s}: type: CNAME Answer: {:s}'.format(answer.name, answer.cname))
elif answer.type == 1:
print('\t {:s}: type: A Answer: {:s}'.format(answer.name, inet_to_str(answer.ip)))
else:
pprint(make_dict(answer))
def process_packets(pcap):
"""Process each packet in a pcap
Args:
pcap: dpkt pcap reader object (dpkt.pcap.Reader)
"""
# For each packet in the pcap process the contents
try:
for timestamp, buf in pcap:
# Print out the timestamp in UTC
print('Timestamp: ', str(datetime.datetime.utcfromtimestamp(timestamp)))
print_packet(buf)
except dpkt.dpkt.NeedData:
print('\nPCAP capture is truncated, stopping processing...')
sys.exit(1)
def test():
"""Open up a test pcap file and print out the packets"""
with open('data/truncated_dns_2.pcap', 'rb') as f:
pcap = dpkt.pcap.Reader(f)
process_packets(pcap)
if __name__ == '__main__':
test()
| 3,422 | 33.575758 | 97 |
py
|
dpkt
|
dpkt-master/examples/print_packets.py
|
"""
Use DPKT to read in a pcap file and print out the contents of the packets.
This example is focused on the fields in the Ethernet Frame and IP packet.
"""
import dpkt
import datetime
from dpkt.utils import mac_to_str, inet_to_str
def print_packets(pcap):
"""Print out information about each packet in a pcap
Args:
pcap: dpkt pcap reader object (dpkt.pcap.Reader)
"""
# For each packet in the pcap process the contents
for timestamp, buf in pcap:
# Print out the timestamp in UTC
print('Timestamp: ', str(datetime.datetime.utcfromtimestamp(timestamp)))
# Unpack the Ethernet frame (mac src/dst, ethertype)
eth = dpkt.ethernet.Ethernet(buf)
print('Ethernet Frame: ', mac_to_str(eth.src), mac_to_str(eth.dst), eth.type)
# Make sure the Ethernet data contains an IP packet
if not isinstance(eth.data, dpkt.ip.IP):
print('Non IP Packet type not supported %s\n' % eth.data.__class__.__name__)
continue
# Now access the data within the Ethernet frame (the IP packet)
# Pulling out src, dst, length, fragment info, TTL, and Protocol
ip = eth.data
# Print out the info, including the fragment flags and offset
print('IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)\n' %
(inet_to_str(ip.src), inet_to_str(ip.dst), ip.len, ip.ttl, ip.df, ip.mf, ip.offset))
# Pretty print the last packet
print('** Pretty print demo **\n')
eth.pprint()
def test():
"""Open up a test pcap file and print out the packets"""
with open('data/http.pcap', 'rb') as f:
pcap = dpkt.pcap.Reader(f)
print_packets(pcap)
if __name__ == '__main__':
test()
| 1,744 | 31.924528 | 98 |
py
|
dpkt
|
dpkt-master/examples/__init__.py
| 0 | 0 | 0 |
py
|
|
dpkt
|
dpkt-master/examples/print_http_requests.py
|
"""
This example expands on the print_packets example. It checks for HTTP request headers and displays their contents.
NOTE: We are not reconstructing 'flows' so the request (and response if you tried to parse it) will only
parse correctly if they fit within a single packet. Requests can often fit in a single packet but
Responses almost never will. For proper reconstruction of flows you may want to look at other projects
that use DPKT (http://chains.readthedocs.io and others)
"""
import dpkt
import datetime
from dpkt.utils import mac_to_str, inet_to_str
def print_http_requests(pcap):
"""Print out information about each packet in a pcap
Args:
pcap: dpkt pcap reader object (dpkt.pcap.Reader)
"""
# For each packet in the pcap process the contents
for timestamp, buf in pcap:
# Unpack the Ethernet frame (mac src/dst, ethertype)
eth = dpkt.ethernet.Ethernet(buf)
# Make sure the Ethernet data contains an IP packet
if not isinstance(eth.data, dpkt.ip.IP):
print('Non IP Packet type not supported %s\n' % eth.data.__class__.__name__)
continue
# Now grab the data within the Ethernet frame (the IP packet)
ip = eth.data
# Check for TCP in the transport layer
if isinstance(ip.data, dpkt.tcp.TCP):
# Set the TCP data
tcp = ip.data
# Now see if we can parse the contents as a HTTP request
try:
request = dpkt.http.Request(tcp.data)
except (dpkt.dpkt.NeedData, dpkt.dpkt.UnpackError):
continue
# Pull out fragment information (flags and offset all packed into off field, so use bitmasks)
do_not_fragment = bool(ip.off & dpkt.ip.IP_DF)
more_fragments = bool(ip.off & dpkt.ip.IP_MF)
fragment_offset = ip.off & dpkt.ip.IP_OFFMASK
# Print out the info
print('Timestamp: ', str(datetime.datetime.utcfromtimestamp(timestamp)))
print('Ethernet Frame: ', mac_to_str(eth.src), mac_to_str(eth.dst), eth.type)
print('IP: %s -> %s (len=%d ttl=%d DF=%d MF=%d offset=%d)' %
(inet_to_str(ip.src), inet_to_str(ip.dst), ip.len, ip.ttl, do_not_fragment, more_fragments, fragment_offset))
print('HTTP request: %s\n' % repr(request))
# Check for Header spanning acrossed TCP segments
if not tcp.data.endswith(b'\r\n'):
print('\nHEADER TRUNCATED! Reassemble TCP segments!\n')
def test():
"""Open up a test pcap file and print out the packets"""
with open('data/http.pcap', 'rb') as f:
pcap = dpkt.pcap.Reader(f)
print_http_requests(pcap)
if __name__ == '__main__':
test()
| 2,796 | 38.394366 | 127 |
py
|
dpkt
|
dpkt-master/examples/old/dnsping.py
|
#!/usr/bin/env python
from __future__ import print_function
import random
import socket
import dpkt
import ping
class DNSPing(ping.Ping):
def __init__(self):
ping.Ping.__init__(self)
self.op.add_option('-z', dest='zone', type='string',
default=socket.gethostname().split('.', 1)[1],
help='Domain to formulate queries in')
self.op.add_option('-n', dest='hostname', type='string',
help='Query only for a given hostname')
self.op.add_option('-p', dest='port', type='int', default=53,
help='Remote DNS server port')
self.op.add_option('-R', dest='norecurse', action='store_true',
help='Disable recursive queries')
def open_sock(self, opts):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((opts.ip, opts.port))
sock.settimeout(opts.wait)
return sock
def gen_ping(self, opts):
for i in range(opts.count):
dns = dpkt.dns.DNS(id=i)
if opts.norecurse:
dns.op &= ~dpkt.dns.DNS_RD
if not opts.hostname:
name = '%s.%s' % (str(random.random())[-6:], opts.zone)
else:
name = opts.hostname
dns.qd = [dpkt.dns.DNS.Q(name=name)]
yield str(dns)
def print_header(self, opts):
print('DNSPING %s:' % opts.ip, end='')
if opts.hostname:
print('Name: %s' % opts.hostname)
else:
print('Name: *.%s' % opts.zone)
def print_reply(self, opts, buf, rtt):
dns = dpkt.dns.DNS(buf)
print('%d bytes from %s: id=%d time=%.3f ms' %
(len(buf), opts.ip, dns.id, rtt * 1000))
if __name__ == '__main__':
DNSPing().main()
| 1,850 | 31.473684 | 73 |
py
|
dpkt
|
dpkt-master/examples/old/nbtping.py
|
#!/usr/bin/env python
import socket
from dpkt import netbios
import ping
class NBTPing(ping.Ping):
def __init__(self):
ping.Ping.__init__(self)
self.op.add_option('-p', dest='port', type='int', default=137,
help='Remote NetBIOS name server port')
def open_sock(self, opts):
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect((opts.ip, opts.port))
sock.settimeout(opts.wait)
return sock
def gen_ping(self, opts):
for i in range(opts.count):
ns = netbios.NS(id=i,
qd=[netbios.NS.Q(type=netbios.NS_NBSTAT, name='*')])
yield str(ns)
def print_header(self, opts):
print('NBTPING %s:' % opts.ip)
def print_reply(self, opts, buf, rtt):
ns = netbios.NS(buf)
d = {}
for rr in ns.an:
for name, svc, flags in rr.nodenames:
unique = (flags & netbios.NS_NAME_G == 0)
if svc == 0 and unique and 'host' not in d:
d['host'] = name
elif svc == 0x03 and unique:
if 'user' not in d or d['user'].startswith(d['host']):
d['user'] = name
print('%d bytes from %s: id=%d time=%.3f ms host=%s user=%s' %
(len(buf), opts.ip, ns.id, rtt * 1000,
d.get('host', ''), d.get('user', '')))
if __name__ == '__main__':
NBTPing().main()
| 1,480 | 30.510638 | 80 |
py
|
dpkt
|
dpkt-master/examples/old/ping.py
|
#!/usr/bin/env python
import math
import optparse
import random
import socket
import sys
import time
import dpkt
class Ping(object):
def __init__(self):
usage = '%prog [OPTIONS] <host>'
self.op = optparse.OptionParser(usage=usage)
self.op.add_option('-c', dest='count', type='int', default=sys.maxint,
help='Total number of queries to send')
self.op.add_option('-i', dest='wait', type='float', default=1,
help='Specify packet interval timeout in seconds')
def gen_ping(self, opts):
pass
def open_sock(self, opts):
pass
def print_header(self, opts):
pass
def print_reply(self, opts, buf, rtt):
pass
def main(self, argv=None):
if not argv:
argv = sys.argv[1:]
opts, args = self.op.parse_args(argv)
if not args:
self.op.error('missing host')
elif len(args) > 1:
self.op.error('only one host may be specified')
host = args[0]
opts.ip = socket.gethostbyname(host)
sock = self.open_sock(opts)
sent = rcvd = rtt_max = rtt_sum = rtt_sumsq = 0
rtt_min = 0xffff
try:
self.print_header(opts)
for ping in self.gen_ping(opts):
try:
start = time.time()
sock.send(ping)
buf = sock.recv(0xffff)
rtt = time.time() - start
if rtt < rtt_min:
rtt_min = rtt
if rtt > rtt_max:
rtt_max = rtt
rtt_sum += rtt
rtt_sumsq += rtt * rtt
self.print_reply(opts, buf, rtt)
rcvd += 1
except socket.timeout:
pass
sent += 1
time.sleep(opts.wait)
except KeyboardInterrupt:
pass
print('\n--- %s ping statistics ---' % opts.ip)
print('%d packets transmitted, %d packets received, %.1f%% packet loss' %
(sent, rcvd, (float(sent - rcvd) / sent) * 100))
rtt_avg = rtt_sum / sent
if rtt_min == 0xffff:
rtt_min = 0
print('round-trip min/avg/max/std-dev = %.3f/%.3f/%.3f/%.3f ms' %
(rtt_min * 1000, rtt_avg * 1000, rtt_max * 1000,
math.sqrt((rtt_sumsq / sent) - (rtt_avg * rtt_avg)) * 1000))
class ICMPPing(Ping):
def __init__(self):
Ping.__init__(self)
self.op.add_option('-p', dest='payload', type='string',
default='hello world!',
help='Echo payload string')
def open_sock(self, opts):
sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, 1)
sock.connect((opts.ip, 1))
sock.settimeout(opts.wait)
return sock
def gen_ping(self, opts):
for i in range(opts.count):
icmp = dpkt.icmp.ICMP(
type=8, data=dpkt.icmp.ICMP.Echo(id=random.randint(0, 0xffff),
seq=i, data=opts.payload))
yield str(icmp)
def print_header(self, opts):
print('PING %s: %d data bytes' % (opts.ip, len(opts.payload)))
def print_reply(self, opts, buf, rtt):
ip = dpkt.ip.IP(buf)
if sys.platform == 'darwin':
# XXX - work around raw socket bug on MacOS X
ip.data = ip.icmp = dpkt.icmp.ICMP(buf[20:])
ip.len = len(ip.data)
print('%d bytes from %s: icmp_seq=%d ip_id=%d ttl=%d time=%.3f ms' %
(len(ip.icmp), opts.ip, ip.icmp.echo.seq, ip.id, ip.ttl,
rtt * 1000))
if __name__ == '__main__':
p = ICMPPing()
p.main()
| 3,810 | 30.237705 | 81 |
py
|
dpkt
|
dpkt-master/examples/old/dhcprequest.py
|
#!/usr/bin/env python
from __future__ import print_function
import sys
import socket
# Since pcapy is not a requirement of dpkt, test the import and give message
try:
import pcapy
except ImportError:
print('Could not import pcapy. Please do a $pip install pcapy')
sys.exit(1)
# dpkt imports
from dpkt import dhcp
from dpkt import udp
from dpkt import ip
from dpkt import ethernet
# Grab the default interface and use that for the injection
devices = pcapy.findalldevs()
iface_name = devices[0]
print('Auto Setting Interface to: {:s}'.format(iface_name))
interface = pcapy.open_live(iface_name, 65536, 1, 0)
# Get local ip
src_ip = socket.inet_pton(socket.AF_INET, interface.getnet())
# Generate broadcast ip and eth_addr
broadcast_ip = socket.inet_pton(socket.AF_INET, '255.255.255.255')
broadcast_eth_addr = b'\xFF\xFF\xFF\xFF\xFF\xFF'
# build a dhcp discover packet to request an ip
d = dhcp.DHCP(
xid=1337,
op=dhcp.DHCPDISCOVER,
opts=(
(dhcp.DHCP_OP_REQUEST, b''),
(dhcp.DHCP_OPT_REQ_IP, b''),
(dhcp.DHCP_OPT_ROUTER, b''),
(dhcp.DHCP_OPT_NETMASK, b''),
(dhcp.DHCP_OPT_DNS_SVRS, b'')
)
)
# build udp packet
u = udp.UDP(
dport=67,
sport=68,
data=d
)
u.ulen = len(u)
# build ip packet
i = ip.IP(
dst=broadcast_ip,
src=src_ip,
data=u,
p=ip.IP_PROTO_UDP
)
i.len = len(i)
# build ethernet frame
e = ethernet.Ethernet(
dst=broadcast_eth_addr,
data=i
)
# Inject the packet (send it out)
interface.sendpacket(bytes(e))
print('DHCP request sent!')
| 1,557 | 20.342466 | 76 |
py
|
dpkt
|
dpkt-master/dpkt/arp.py
|
# $Id: arp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Address Resolution Protocol."""
from __future__ import absolute_import
from . import dpkt
# Hardware address format
ARP_HRD_ETH = 0x0001 # ethernet hardware
ARP_HRD_IEEE802 = 0x0006 # IEEE 802 hardware
# Protocol address format
ARP_PRO_IP = 0x0800 # IP protocol
# ARP operation
ARP_OP_REQUEST = 1 # request to resolve ha given pa
ARP_OP_REPLY = 2 # response giving hardware address
ARP_OP_REVREQUEST = 3 # request to resolve pa given ha
ARP_OP_REVREPLY = 4 # response giving protocol address
class ARP(dpkt.Packet):
"""Address Resolution Protocol.
See more about the ARP on \
https://en.wikipedia.org/wiki/Address_Resolution_Protocol
Attributes:
__hdr__: Header fields of ARP.
"""
__hdr__ = (
('hrd', 'H', ARP_HRD_ETH),
('pro', 'H', ARP_PRO_IP),
('hln', 'B', 6), # hardware address length
('pln', 'B', 4), # protocol address length
('op', 'H', ARP_OP_REQUEST),
('sha', '6s', b''),
('spa', '4s', b''),
('tha', '6s', b''),
('tpa', '4s', b'')
)
| 1,143 | 25.604651 | 61 |
py
|
dpkt
|
dpkt-master/dpkt/ip.py
|
# $Id: ip.py 87 2013-03-05 19:41:04Z [email protected] $
# -*- coding: utf-8 -*-
"""Internet Protocol."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
from .compat import iteritems
from .utils import inet_to_str, deprecation_warning
_ip_proto_names = {} # {1: 'ICMP', 6: 'TCP', 17: 'UDP', etc.}
def get_ip_proto_name(p):
return _ip_proto_names.get(p, None)
class IP(dpkt.Packet):
"""Internet Protocol.
The Internet Protocol (IP) is the network layer communications protocol in the Internet protocol suite
for relaying datagrams across network boundaries. Its routing function enables internetworking, and
essentially establishes the Internet.
Attributes:
__hdr__: Header fields of IP.
_v_hl:
v: (int): Version (4 bits) For IPv4, this is always equal to 4
hl: (int): Internet Header Length (IHL) (4 bits)
_flags_offset:
rf: (int): Reserved bit (1 bit)
df: (int): Don't fragment (1 bit)
mf: (int): More fragments (1 bit)
offset: (int): Fragment offset (13 bits)
tos: (int): Type of service. (1 byte)
len: (int): Total Length. Defines the entire packet size in bytes, including header and data.(2 bytes)
id: (int): Identification. Uniquely identifying the group of fragments of a single IP datagram. (2 bytes)
ttl: (int): Time to live (1 byte)
p: (int): Protocol. This field defines the protocol used in the data portion of the IP datagram. (1 byte)
sum: (int): Header checksum. (2 bytes)
src: (int): Source address. This field is the IPv4 address of the sender of the packet. (4 bytes)
dst: (int): Destination address. This field is the IPv4 address of the receiver of the packet. (4 bytes)
"""
__hdr__ = (
('_v_hl', 'B', (4 << 4) | (20 >> 2)),
('tos', 'B', 0),
('len', 'H', 20),
('id', 'H', 0),
('_flags_offset', 'H', 0), # XXX - previously ip.off
('ttl', 'B', 64),
('p', 'B', 0),
('sum', 'H', 0),
('src', '4s', b'\x00' * 4),
('dst', '4s', b'\x00' * 4)
)
__bit_fields__ = {
'_v_hl': (
('v', 4), # version, 4 bits
('hl', 4), # header len, 4 bits
),
'_flags_offset': (
('rf', 1), # reserved bit
('df', 1), # don't fragment
('mf', 1), # more fragments
('offset', 13), # fragment offset, 13 bits
)
}
__pprint_funcs__ = {
'dst': inet_to_str,
'src': inet_to_str,
'sum': hex, # display checksum in hex
'p': get_ip_proto_name
}
_protosw = {}
opts = b''
def __init__(self, *args, **kwargs):
super(IP, self).__init__(*args, **kwargs)
# If IP packet is not initialized by string and the len field has
# been rewritten.
if not args and 'len' not in kwargs:
self.len = self.__len__()
def __len__(self):
return self.__hdr_len__ + len(self.opts) + len(self.data)
def __bytes__(self):
if self.sum == 0:
self.len = self.__len__()
self.sum = dpkt.in_cksum(self.pack_hdr() + bytes(self.opts))
if (self.p == 6 or self.p == 17) and (self._flags_offset & (IP_MF | IP_OFFMASK)) == 0 and \
isinstance(self.data, dpkt.Packet) and self.data.sum == 0:
# Set zeroed TCP and UDP checksums for non-fragments.
p = bytes(self.data)
s = dpkt.struct.pack('>4s4sxBH', self.src, self.dst, self.p, len(p))
s = dpkt.in_cksum_add(0, s)
s = dpkt.in_cksum_add(s, p)
self.data.sum = dpkt.in_cksum_done(s)
# RFC 768 (Fields):
# If the computed checksum is zero, it is transmitted as all
# ones (the equivalent in one's complement arithmetic). An all
# zero transmitted checksum value means that the transmitter
# generated no checksum (for debugging or for higher level
# protocols that don't care).
if self.p == 17 and self.data.sum == 0:
self.data.sum = 0xffff
# XXX - skip transports which don't need the pseudoheader
return self.pack_hdr() + bytes(self.opts) + bytes(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
ol = ((self._v_hl & 0xf) << 2) - self.__hdr_len__
if ol < 0:
raise dpkt.UnpackError('invalid header length')
self.opts = buf[self.__hdr_len__:self.__hdr_len__ + ol]
if self.len:
buf = buf[self.__hdr_len__ + ol:self.len]
else: # very likely due to TCP segmentation offload
buf = buf[self.__hdr_len__ + ol:]
try:
self.data = self._protosw[self.p](buf) if self.offset == 0 else buf
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
@classmethod
def set_proto(cls, p, pktclass):
cls._protosw[p] = pktclass
@classmethod
def get_proto(cls, p):
return cls._protosw[p]
# XXX - compatibility; to be deprecated
@property
def off(self):
deprecation_warning("IP.off is deprecated")
return self._flags_offset
@off.setter
def off(self, val):
deprecation_warning("IP.off is deprecated")
self.offset = val
# IP Headers
IP_ADDR_LEN = 0x04
IP_ADDR_BITS = 0x20
IP_HDR_LEN = 0x14
IP_OPT_LEN = 0x02
IP_OPT_LEN_MAX = 0x28
IP_HDR_LEN_MAX = IP_HDR_LEN + IP_OPT_LEN_MAX
IP_LEN_MAX = 0xffff
IP_LEN_MIN = IP_HDR_LEN
# Reserved Addresses
IP_ADDR_ANY = "\x00\x00\x00\x00" # 0.0.0.0
IP_ADDR_BROADCAST = "\xff\xff\xff\xff" # 255.255.255.255
IP_ADDR_LOOPBACK = "\x7f\x00\x00\x01" # 127.0.0.1
IP_ADDR_MCAST_ALL = "\xe0\x00\x00\x01" # 224.0.0.1
IP_ADDR_MCAST_LOCAL = "\xe0\x00\x00\xff" # 224.0.0.255
# Type of service (ip_tos), RFC 1349 ("obsoleted by RFC 2474")
IP_TOS_DEFAULT = 0x00 # default
IP_TOS_LOWDELAY = 0x10 # low delay
IP_TOS_THROUGHPUT = 0x08 # high throughput
IP_TOS_RELIABILITY = 0x04 # high reliability
IP_TOS_LOWCOST = 0x02 # low monetary cost - XXX
IP_TOS_ECT = 0x02 # ECN-capable transport
IP_TOS_CE = 0x01 # congestion experienced
# IP precedence (high 3 bits of ip_tos), hopefully unused
IP_TOS_PREC_ROUTINE = 0x00
IP_TOS_PREC_PRIORITY = 0x20
IP_TOS_PREC_IMMEDIATE = 0x40
IP_TOS_PREC_FLASH = 0x60
IP_TOS_PREC_FLASHOVERRIDE = 0x80
IP_TOS_PREC_CRITIC_ECP = 0xa0
IP_TOS_PREC_INTERNETCONTROL = 0xc0
IP_TOS_PREC_NETCONTROL = 0xe0
# Fragmentation flags (ip_off)
IP_RF = 0x8000 # reserved
IP_DF = 0x4000 # don't fragment
IP_MF = 0x2000 # more fragments (not last frag)
IP_OFFMASK = 0x1fff # mask for fragment offset
# Time-to-live (ip_ttl), seconds
IP_TTL_DEFAULT = 64 # default ttl, RFC 1122, RFC 1340
IP_TTL_MAX = 255 # maximum ttl
# Protocol (ip_p) - http://www.iana.org/assignments/protocol-numbers
IP_PROTO_IP = 0 # dummy for IP
IP_PROTO_HOPOPTS = IP_PROTO_IP # IPv6 hop-by-hop options
IP_PROTO_ICMP = 1 # ICMP
IP_PROTO_IGMP = 2 # IGMP
IP_PROTO_GGP = 3 # gateway-gateway protocol
IP_PROTO_IPIP = 4 # IP in IP
IP_PROTO_ST = 5 # ST datagram mode
IP_PROTO_TCP = 6 # TCP
IP_PROTO_CBT = 7 # CBT
IP_PROTO_EGP = 8 # exterior gateway protocol
IP_PROTO_IGP = 9 # interior gateway protocol
IP_PROTO_BBNRCC = 10 # BBN RCC monitoring
IP_PROTO_NVP = 11 # Network Voice Protocol
IP_PROTO_PUP = 12 # PARC universal packet
IP_PROTO_ARGUS = 13 # ARGUS
IP_PROTO_EMCON = 14 # EMCON
IP_PROTO_XNET = 15 # Cross Net Debugger
IP_PROTO_CHAOS = 16 # Chaos
IP_PROTO_UDP = 17 # UDP
IP_PROTO_MUX = 18 # multiplexing
IP_PROTO_DCNMEAS = 19 # DCN measurement
IP_PROTO_HMP = 20 # Host Monitoring Protocol
IP_PROTO_PRM = 21 # Packet Radio Measurement
IP_PROTO_IDP = 22 # Xerox NS IDP
IP_PROTO_TRUNK1 = 23 # Trunk-1
IP_PROTO_TRUNK2 = 24 # Trunk-2
IP_PROTO_LEAF1 = 25 # Leaf-1
IP_PROTO_LEAF2 = 26 # Leaf-2
IP_PROTO_RDP = 27 # "Reliable Datagram" proto
IP_PROTO_IRTP = 28 # Inet Reliable Transaction
IP_PROTO_TP = 29 # ISO TP class 4
IP_PROTO_NETBLT = 30 # Bulk Data Transfer
IP_PROTO_MFPNSP = 31 # MFE Network Services
IP_PROTO_MERITINP = 32 # Merit Internodal Protocol
IP_PROTO_SEP = 33 # Sequential Exchange proto
IP_PROTO_3PC = 34 # Third Party Connect proto
IP_PROTO_IDPR = 35 # Interdomain Policy Route
IP_PROTO_XTP = 36 # Xpress Transfer Protocol
IP_PROTO_DDP = 37 # Datagram Delivery Proto
IP_PROTO_CMTP = 38 # IDPR Ctrl Message Trans
IP_PROTO_TPPP = 39 # TP++ Transport Protocol
IP_PROTO_IL = 40 # IL Transport Protocol
IP_PROTO_IP6 = 41 # IPv6
IP_PROTO_SDRP = 42 # Source Demand Routing
IP_PROTO_ROUTING = 43 # IPv6 routing header
IP_PROTO_FRAGMENT = 44 # IPv6 fragmentation header
IP_PROTO_RSVP = 46 # Reservation protocol
IP_PROTO_GRE = 47 # General Routing Encap
IP_PROTO_MHRP = 48 # Mobile Host Routing
IP_PROTO_ENA = 49 # ENA
IP_PROTO_ESP = 50 # Encap Security Payload
IP_PROTO_AH = 51 # Authentication Header
IP_PROTO_INLSP = 52 # Integated Net Layer Sec
IP_PROTO_SWIPE = 53 # SWIPE
IP_PROTO_NARP = 54 # NBMA Address Resolution
IP_PROTO_MOBILE = 55 # Mobile IP, RFC 2004
IP_PROTO_TLSP = 56 # Transport Layer Security
IP_PROTO_SKIP = 57 # SKIP
IP_PROTO_ICMP6 = 58 # ICMP for IPv6
IP_PROTO_NONE = 59 # IPv6 no next header
IP_PROTO_DSTOPTS = 60 # IPv6 destination options
IP_PROTO_ANYHOST = 61 # any host internal proto
IP_PROTO_CFTP = 62 # CFTP
IP_PROTO_ANYNET = 63 # any local network
IP_PROTO_EXPAK = 64 # SATNET and Backroom EXPAK
IP_PROTO_KRYPTOLAN = 65 # Kryptolan
IP_PROTO_RVD = 66 # MIT Remote Virtual Disk
IP_PROTO_IPPC = 67 # Inet Pluribus Packet Core
IP_PROTO_DISTFS = 68 # any distributed fs
IP_PROTO_SATMON = 69 # SATNET Monitoring
IP_PROTO_VISA = 70 # VISA Protocol
IP_PROTO_IPCV = 71 # Inet Packet Core Utility
IP_PROTO_CPNX = 72 # Comp Proto Net Executive
IP_PROTO_CPHB = 73 # Comp Protocol Heart Beat
IP_PROTO_WSN = 74 # Wang Span Network
IP_PROTO_PVP = 75 # Packet Video Protocol
IP_PROTO_BRSATMON = 76 # Backroom SATNET Monitor
IP_PROTO_SUNND = 77 # SUN ND Protocol
IP_PROTO_WBMON = 78 # WIDEBAND Monitoring
IP_PROTO_WBEXPAK = 79 # WIDEBAND EXPAK
IP_PROTO_EON = 80 # ISO CNLP
IP_PROTO_VMTP = 81 # Versatile Msg Transport
IP_PROTO_SVMTP = 82 # Secure VMTP
IP_PROTO_VINES = 83 # VINES
IP_PROTO_TTP = 84 # TTP
IP_PROTO_NSFIGP = 85 # NSFNET-IGP
IP_PROTO_DGP = 86 # Dissimilar Gateway Proto
IP_PROTO_TCF = 87 # TCF
IP_PROTO_EIGRP = 88 # EIGRP
IP_PROTO_OSPF = 89 # Open Shortest Path First
IP_PROTO_SPRITERPC = 90 # Sprite RPC Protocol
IP_PROTO_LARP = 91 # Locus Address Resolution
IP_PROTO_MTP = 92 # Multicast Transport Proto
IP_PROTO_AX25 = 93 # AX.25 Frames
IP_PROTO_IPIPENCAP = 94 # yet-another IP encap
IP_PROTO_MICP = 95 # Mobile Internet Ctrl
IP_PROTO_SCCSP = 96 # Semaphore Comm Sec Proto
IP_PROTO_ETHERIP = 97 # Ethernet in IPv4
IP_PROTO_ENCAP = 98 # encapsulation header
IP_PROTO_ANYENC = 99 # private encryption scheme
IP_PROTO_GMTP = 100 # GMTP
IP_PROTO_IFMP = 101 # Ipsilon Flow Mgmt Proto
IP_PROTO_PNNI = 102 # PNNI over IP
IP_PROTO_PIM = 103 # Protocol Indep Multicast
IP_PROTO_ARIS = 104 # ARIS
IP_PROTO_SCPS = 105 # SCPS
IP_PROTO_QNX = 106 # QNX
IP_PROTO_AN = 107 # Active Networks
IP_PROTO_IPCOMP = 108 # IP Payload Compression
IP_PROTO_SNP = 109 # Sitara Networks Protocol
IP_PROTO_COMPAQPEER = 110 # Compaq Peer Protocol
IP_PROTO_IPXIP = 111 # IPX in IP
IP_PROTO_VRRP = 112 # Virtual Router Redundancy
IP_PROTO_PGM = 113 # PGM Reliable Transport
IP_PROTO_ANY0HOP = 114 # 0-hop protocol
IP_PROTO_L2TP = 115 # Layer 2 Tunneling Proto
IP_PROTO_DDX = 116 # D-II Data Exchange (DDX)
IP_PROTO_IATP = 117 # Interactive Agent Xfer
IP_PROTO_STP = 118 # Schedule Transfer Proto
IP_PROTO_SRP = 119 # SpectraLink Radio Proto
IP_PROTO_UTI = 120 # UTI
IP_PROTO_SMP = 121 # Simple Message Protocol
IP_PROTO_SM = 122 # SM
IP_PROTO_PTP = 123 # Performance Transparency
IP_PROTO_ISIS = 124 # ISIS over IPv4
IP_PROTO_FIRE = 125 # FIRE
IP_PROTO_CRTP = 126 # Combat Radio Transport
IP_PROTO_CRUDP = 127 # Combat Radio UDP
IP_PROTO_SSCOPMCE = 128 # SSCOPMCE
IP_PROTO_IPLT = 129 # IPLT
IP_PROTO_SPS = 130 # Secure Packet Shield
IP_PROTO_PIPE = 131 # Private IP Encap in IP
IP_PROTO_SCTP = 132 # Stream Ctrl Transmission
IP_PROTO_FC = 133 # Fibre Channel
IP_PROTO_RSVPIGN = 134 # RSVP-E2E-IGNORE
IP_PROTO_RAW = 255 # Raw IP packets
IP_PROTO_RESERVED = IP_PROTO_RAW # Reserved
IP_PROTO_MAX = 255
# XXX - auto-load IP dispatch table from IP_PROTO_* definitions
def __load_protos():
g = globals()
for k, v in iteritems(g):
if k.startswith('IP_PROTO_'):
name = k[9:]
_ip_proto_names[v] = name
try:
mod = __import__(name.lower(), g, level=1)
IP.set_proto(v, getattr(mod, name))
except (ImportError, AttributeError):
continue
def _mod_init():
"""Post-initialization called when all dpkt modules are fully loaded"""
if not IP._protosw:
__load_protos()
def test_ip():
from . import udp
s = b'E\x00\x00"\x00\x00\x00\x00@\x11r\xc0\x01\x02\x03\x04\x01\x02\x03\x04\x00o\x00\xde\x00\x0e\xbf5foobar'
ip = IP(id=0, src=b'\x01\x02\x03\x04', dst=b'\x01\x02\x03\x04', p=17)
u = udp.UDP(sport=111, dport=222)
u.data = b'foobar'
u.ulen += len(u.data)
ip.data = u
ip.len += len(u)
assert (bytes(ip) == s)
assert (ip.v == 4)
assert (ip.hl == 5)
ip = IP(s)
assert (bytes(ip) == s)
assert (ip.udp.sport == 111)
assert (ip.udp.data == b'foobar')
def test_dict():
ip = IP(id=0, src=b'\x01\x02\x03\x04', dst=b'\x01\x02\x03\x04', p=17)
d = dict(ip)
assert (d['src'] == b'\x01\x02\x03\x04')
assert (d['dst'] == b'\x01\x02\x03\x04')
assert (d['id'] == 0)
assert (d['p'] == 17)
def test_hl(): # Todo check this test method
s = (b'BB\x03\x00\x00\x00\x00\x00\x00\x00\xd0\x00\xec\xbc\xa5\x00\x00\x00\x03\x80\x00\x00\xd0'
b'\x01\xf2\xac\xa5"0\x01\x00\x14\x00\x02\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00')
try:
IP(s)
except dpkt.UnpackError:
pass
def test_opt():
s = (b'\x4f\x00\x00\x3c\xae\x08\x00\x00\x40\x06\x18\x10\xc0\xa8\x0a\x26\xc0\xa8\x0a\x01\x07\x27'
b'\x08\x01\x02\x03\x04\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
ip = IP(s)
ip.sum = 0
assert (bytes(ip) == s)
def test_iplen():
# ensure the ip.len is not affected by serialization
# https://github.com/kbandla/dpkt/issues/279 , https://github.com/kbandla/dpkt/issues/625
s = (b'\x45\x00\x00\xa3\xb6\x7a\x00\x00\x80\x11\x5e\x6f\xc0\xa8\x03\x02\x97\x47\xca\x6e\xc7\x38'
b'\x64\xdf\x00\x8f\x4a\xfa\x26\xd8\x15\xbe\xd9\x42\xae\x66\xe1\xce\x14\x5f\x06\x79\x4b\x13'
b'\x02\xad\xa4\x8b\x69\x1c\x7a\xf6\xd5\x3d\x45\xaa\xba\xcd\x24\x77\xc2\xe7\x5f\x6a\xcc\xb5'
b'\x1f\x21\xfa\x62\xf0\xf3\x32\xe1\xe4\xf0\x20\x1f\x47\x61\xec\xbc\xb1\x0e\x6c\xf0\xb8\x6d'
b'\x7f\x96\x9b\x35\x03\xa1\x79\x05\xc5\xfd\x2a\xf7\xfa\x35\xe3\x0e\x04\xd0\xc7\x4e\x94\x72'
b'\x3d\x07\x5a\xa8\x53\x2a\x5d\x03\xf7\x04\xc4\xa8\xb8\xa1')
ip_len1 = IP(s).len # original len
assert (IP(bytes(IP(s))).len == ip_len1)
def test_zerolen():
from . import tcp
d = b'X' * 2048
s = (b'\x45\x00\x00\x00\x34\xce\x40\x00\x80\x06\x00\x00\x7f\x00\x00\x01\x7f\x00\x00\x01\xcc\x4e'
b'\x0c\x38\x60\xff\xc6\x4e\x5f\x8a\x12\x98\x50\x18\x40\x29\x3a\xa3\x00\x00') + d
ip = IP(s)
assert (isinstance(ip.data, tcp.TCP))
assert (ip.tcp.data == d)
def test_constuctor():
ip1 = IP(data=b"Hello world!")
ip2 = IP(data=b"Hello world!", len=0)
ip3 = IP(bytes(ip1))
ip4 = IP(bytes(ip2))
assert (bytes(ip1) == bytes(ip3))
assert (bytes(ip1) == b'E\x00\x00 \x00\x00\x00\x00@\x00z\xdf\x00\x00\x00\x00\x00\x00\x00\x00Hello world!')
assert (bytes(ip2) == bytes(ip4))
assert (bytes(ip2) == b'E\x00\x00 \x00\x00\x00\x00@\x00z\xdf\x00\x00\x00\x00\x00\x00\x00\x00Hello world!')
def test_frag():
from . import ethernet
s = (b'\x00\x23\x20\xd4\x2a\x8c\x00\x23\x20\xd4\x2a\x8c\x08\x00\x45\x00\x00\x54\x00\x00\x40\x00'
b'\x40\x01\x25\x8d\x0a\x00\x00\x8f\x0a\x00\x00\x8e\x08\x00\x2e\xa0\x01\xff\x23\x73\x20\x48'
b'\x4a\x4d\x00\x00\x00\x00\x78\x85\x02\x00\x00\x00\x00\x00\x10\x11\x12\x13\x14\x15\x16\x17'
b'\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f\x20\x21\x22\x23\x24\x25\x26\x27\x28\x29\x2a\x2b\x2c\x2d'
b'\x2e\x2f\x30\x31\x32\x33\x34\x35\x36\x37')
ip = ethernet.Ethernet(s).ip
assert (ip.rf == 0)
assert (ip.df == 1)
assert (ip.mf == 0)
assert (ip.offset == 0)
# test setters of fragmentation related attributes.
ip.rf = 1
ip.df = 0
ip.mf = 1
ip.offset = 1480
assert (ip.rf == 1)
assert (ip.df == 0)
assert (ip.mf == 1)
assert (ip.offset == 1480)
def test_property_setters():
ip = IP()
assert ip.v == 4
ip.v = 6
assert ip.v == 6
# test property delete
del ip.v
assert ip.v == 4 # back to default
assert ip.hl == 5
ip.hl = 7
assert ip.hl == 7
del ip.hl
assert ip.hl == 5
# coverage
ip.off = 10
assert ip.off == 10
def test_default_udp_checksum():
from dpkt.udp import UDP
udp = UDP(sport=1, dport=0xffdb)
ip = IP(src=b'\x00\x00\x00\x01', dst=b'\x00\x00\x00\x01', p=17, data=udp)
assert ip.p == 17
assert ip.data.sum == 0
# this forces recalculation of the data layer checksum
bytes(ip)
# during calculation the checksum was evaluated to 0x0000
# this was then conditionally set to 0xffff per RFC768
assert ip.data.sum == 0xffff
def test_get_proto_name():
assert get_ip_proto_name(6) == 'TCP'
assert get_ip_proto_name(999) is None
| 18,117 | 34.877228 | 117 |
py
|
dpkt
|
dpkt-master/dpkt/aim.py
|
# $Id: aim.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""AOL Instant Messenger."""
from __future__ import absolute_import
import struct
from . import dpkt
# OSCAR: http://iserverd1.khstu.ru/oscar/
class FLAP(dpkt.Packet):
"""Frame Layer Protocol.
See more about the FLAP on
https://en.wikipedia.org/wiki/OSCAR_protocol#FLAP_header
Attributes:
__hdr__: Header fields of FLAP.
data: Message data.
"""
__hdr__ = (
('ast', 'B', 0x2a), # '*'
('type', 'B', 0),
('seq', 'H', 0),
('len', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.ast != 0x2a:
raise dpkt.UnpackError('invalid FLAP header')
if len(self.data) < self.len:
raise dpkt.NeedData('%d left, %d needed' % (len(self.data), self.len))
class SNAC(dpkt.Packet):
"""Simple Network Atomic Communication.
See more about the SNAC on
https://en.wikipedia.org/wiki/OSCAR_protocol#SNAC_data
Attributes:
__hdr__: Header fields of SNAC.
"""
__hdr__ = (
('family', 'H', 0),
('subtype', 'H', 0),
('flags', 'H', 0),
('reqid', 'I', 0)
)
def tlv(buf):
n = 4
try:
t, l_ = struct.unpack('>HH', buf[:n])
except struct.error:
raise dpkt.UnpackError('invalid type, length fields')
v = buf[n:n + l_]
if len(v) < l_:
raise dpkt.NeedData('%d left, %d needed' % (len(v), l_))
buf = buf[n + l_:]
return t, l_, v, buf
# TOC 1.0: http://jamwt.com/Py-TOC/PROTOCOL
# TOC 2.0: http://www.firestuff.org/projects/firetalk/doc/toc2.txt
def testAIM():
testdata = (
b'\x2a\x02\xac\xf3\x00\x81\x00\x03\x00\x0b\x00\x00\xfa\x45\x55\x64\x0e\x75\x73\x72\x6e\x61'
b'\x6d\x65\x72\x65\x6d\x6f\x76\x65\x64\x00\x00\x00\x0a\x00\x01\x00\x02\x12\x90\x00\x44\x00'
b'\x01\x00\x00\x03\x00\x04\x58\x90\x54\x36\x00\x45\x00\x04\x00\x00\x0f\x93\x00\x21\x00\x08'
b'\x00\x85\x00\x7d\x00\x7d\x00\x00\x00\x41\x00\x01\x00\x00\x37\x00\x04\x00\x00\x00\x00\x00'
b'\x0d\x00\x00\x00\x19\x00\x00\x00\x1d\x00\x24\x00\x00\x00\x05\x02\x01\xd2\x04\x72\x00\x01'
b'\x00\x05\x02\x01\xd2\x04\x72\x00\x03\x00\x05\x2b\x00\x00\x2a\xcc\x00\x81\x00\x05\x2b\x00'
b'\x00\x13\xf1'
)
flap = FLAP(testdata)
assert flap.ast == 0x2a
assert flap.type == 0x02
assert flap.seq == 44275
assert flap.len == 129
assert flap.data == (
b'\x00\x03\x00\x0b\x00\x00\xfa\x45\x55\x64\x0e\x75\x73\x72\x6e\x61\x6d\x65\x72\x65\x6d\x6f'
b'\x76\x65\x64\x00\x00\x00\x0a\x00\x01\x00\x02\x12\x90\x00\x44\x00\x01\x00\x00\x03\x00\x04'
b'\x58\x90\x54\x36\x00\x45\x00\x04\x00\x00\x0f\x93\x00\x21\x00\x08\x00\x85\x00\x7d\x00\x7d'
b'\x00\x00\x00\x41\x00\x01\x00\x00\x37\x00\x04\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x19\x00'
b'\x00\x00\x1d\x00\x24\x00\x00\x00\x05\x02\x01\xd2\x04\x72\x00\x01\x00\x05\x02\x01\xd2\x04'
b'\x72\x00\x03\x00\x05\x2b\x00\x00\x2a\xcc\x00\x81\x00\x05\x2b\x00\x00\x13\xf1'
)
snac = SNAC(flap.data)
assert snac.family == 3
assert snac.subtype == 11
assert snac.flags == 0
assert snac.reqid == 0xfa455564
assert snac.data == (
b'\x0e\x75\x73\x72\x6e\x61\x6d\x65\x72\x65\x6d\x6f\x76\x65\x64\x00\x00\x00\x0a\x00\x01\x00'
b'\x02\x12\x90\x00\x44\x00\x01\x00\x00\x03\x00\x04\x58\x90\x54\x36\x00\x45\x00\x04\x00\x00'
b'\x0f\x93\x00\x21\x00\x08\x00\x85\x00\x7d\x00\x7d\x00\x00\x00\x41\x00\x01\x00\x00\x37\x00'
b'\x04\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x19\x00\x00\x00\x1d\x00\x24\x00\x00\x00\x05\x02'
b'\x01\xd2\x04\x72\x00\x01\x00\x05\x02\x01\xd2\x04\x72\x00\x03\x00\x05\x2b\x00\x00\x2a\xcc'
b'\x00\x81\x00\x05\x2b\x00\x00\x13\xf1'
)
# skip over the buddyname and TLV count in Oncoming Buddy message
tlvdata = snac.data[19:]
tlvCount = 0
while tlvdata:
t, l_, v, tlvdata = tlv(tlvdata)
tlvCount += 1
if tlvCount == 1:
# just check function return for first TLV
assert t == 0x01
assert l_ == 2
assert v == b'\x12\x90'
assert tlvdata == (
b'\x00\x44\x00\x01\x00\x00\x03\x00\x04\x58\x90\x54\x36\x00\x45\x00\x04\x00\x00\x0f'
b'\x93\x00\x21\x00\x08\x00\x85\x00\x7d\x00\x7d\x00\x00\x00\x41\x00\x01\x00\x00\x37'
b'\x00\x04\x00\x00\x00\x00\x00\x0d\x00\x00\x00\x19\x00\x00\x00\x1d\x00\x24\x00\x00'
b'\x00\x05\x02\x01\xd2\x04\x72\x00\x01\x00\x05\x02\x01\xd2\x04\x72\x00\x03\x00\x05'
b'\x2b\x00\x00\x2a\xcc\x00\x81\x00\x05\x2b\x00\x00\x13\xf1'
)
# make sure we extracted 10 TLVs
assert tlvCount == 10
def testExceptions():
testdata = b'xxxxxx'
try:
FLAP(testdata)
except dpkt.UnpackError as e:
assert str(e) == 'invalid FLAP header'
testdata = b'*\x02\x12\x34\x00\xff'
try:
FLAP(testdata)
except dpkt.NeedData as e:
assert str(e) == '0 left, 255 needed'
try:
t, l_, v, _ = tlv(b'x')
except dpkt.UnpackError as e:
assert str(e) == 'invalid type, length fields'
try:
t, l_, v, _ = tlv(b'\x00\x01\x00\xff')
except dpkt.NeedData as e:
assert str(e) == '0 left, 255 needed'
| 5,353 | 32.4625 | 99 |
py
|
dpkt
|
dpkt-master/dpkt/udp.py
|
# $Id: udp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""User Datagram Protocol."""
from __future__ import absolute_import
from . import dpkt
UDP_HDR_LEN = 8
UDP_PORT_MAX = 65535
class UDP(dpkt.Packet):
"""User Datagram Protocol.
User Datagram Protocol (UDP) is one of the core members of the Internet protocol suite.
With UDP, computer applications can send messages, in this case referred to as datagrams,
to other hosts on an Internet Protocol (IP) network. Prior communications are not required
in order to set up communication channels or data paths.
Attributes:
__hdr__: Header fields of UDP.
sport: (int): Source port. (2 bytes)
dport: (int): Destination port. (2 bytes)
ulen: (int): Length. (2 bytes)
sum: (int): Checksum. (2 bytes)
"""
__hdr__ = (
('sport', 'H', 0xdead),
('dport', 'H', 0),
('ulen', 'H', 8),
('sum', 'H', 0)
)
| 985 | 28 | 94 |
py
|
dpkt
|
dpkt-master/dpkt/sip.py
|
# $Id: sip.py 48 2008-05-27 17:31:15Z yardley $
# -*- coding: utf-8 -*-
"""Session Initiation Protocol."""
from __future__ import absolute_import
from . import http
class Request(http.Request):
"""SIP request.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of SIP request.
TODO.
"""
__hdr_defaults__ = {
'method': 'INVITE',
'uri': 'sip:[email protected]',
'version': '2.0',
'headers': {'To': '', 'From': '', 'Call-ID': '', 'CSeq': '', 'Contact': ''}
}
__methods = dict.fromkeys((
'ACK', 'BYE', 'CANCEL', 'INFO', 'INVITE', 'MESSAGE', 'NOTIFY',
'OPTIONS', 'PRACK', 'PUBLISH', 'REFER', 'REGISTER', 'SUBSCRIBE',
'UPDATE'
))
__proto = 'SIP'
class Response(http.Response):
"""SIP response.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of SIP response.
TODO.
"""
__hdr_defaults__ = {
'version': '2.0',
'status': '200',
'reason': 'OK',
'headers': {'To': '', 'From': '', 'Call-ID': '', 'CSeq': '', 'Contact': ''}
}
__proto = 'SIP'
| 1,164 | 22.3 | 83 |
py
|
dpkt
|
dpkt-master/dpkt/sll.py
|
# $Id: sll.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Linux libpcap "cooked" capture encapsulation."""
from __future__ import absolute_import
from . import arp
from . import dpkt
from . import ethernet
class SLL(dpkt.Packet):
"""Linux libpcap "cooked" capture encapsulation.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of SLL.
TODO.
"""
__hdr__ = (
('type', 'H', 0), # 0: to us, 1: bcast, 2: mcast, 3: other, 4: from us
('hrd', 'H', arp.ARP_HRD_ETH),
('hlen', 'H', 6), # hardware address length
('hdr', '8s', b''), # first 8 bytes of link-layer header
('ethtype', 'H', ethernet.ETH_TYPE_IP),
)
_typesw = ethernet.Ethernet._typesw
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
try:
self.data = self._typesw[self.ethtype](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
pass
def test_sll():
slldata = (b'\x00\x00\x00\x01\x00\x06\x00\x0b\xdb\x52\x0e\x08\xf6\x7f\x08\x00\x45\x00\x00\x34'
b'\xcc\x6c\x40\x00\x40\x06\x74\x08\x82\xd9\xfa\x8e\x82\xd9\xfa\x0d')
slltest = SLL(slldata)
assert slltest.type == 0
assert slltest.hrd == 1
assert slltest.hlen == 6
assert slltest.hdr == b'\x00\x0b\xdb\x52\x0e\x08\xf6\x7f'
assert slltest.ethtype == 0x0800
# give invalid ethtype of 0x1234 to make sure error is caught
slldata2 = (b'\x00\x00\x00\x01\x00\x06\x00\x0b\xdb\x52\x0e\x08\xf6\x7f\x12\x34\x45\x00\x00\x34'
b'\xcc\x6c\x40\x00\x40\x06\x74\x08\x82\xd9\xfa\x8e\x82\xd9\xfa\x0d')
slltest = SLL(slldata2)
| 1,741 | 31.867925 | 99 |
py
|
dpkt
|
dpkt-master/dpkt/llc.py
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import absolute_import
import struct
from . import dpkt
from . import stp
class LLC(dpkt.Packet):
"""802.2 Logical Link Control (LLC) data communication protocol.
Attributes:
__hdr__ = (
('dsap', 'B', 0xaa), # Destination Service Access Point
('ssap', 'B', 0xaa), # Source Service Access Point
('ctl', 'B', 3) # Control Byte
)
"""
__hdr__ = (
('dsap', 'B', 0xaa), # Destination Service Access Point
('ssap', 'B', 0xaa), # Source Service Access Point
('ctl', 'B', 3) # Control Byte
)
@property
def is_snap(self):
return self.dsap == self.ssap == 0xaa
def unpack(self, buf):
from .ethernet import Ethernet, ETH_TYPE_IP, ETH_TYPE_IPX
dpkt.Packet.unpack(self, buf)
if self.is_snap:
self.oui, self.type = struct.unpack('>IH', b'\x00' + self.data[:5])
self.data = self.data[5:]
try:
self.data = Ethernet.get_type(self.type)(self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
pass
else:
# non-SNAP
if self.dsap == 0x06: # SAP_IP
self.data = self.ip = Ethernet.get_type(ETH_TYPE_IP)(self.data)
elif self.dsap == 0x10 or self.dsap == 0xe0: # SAP_NETWARE{1,2}
self.data = self.ipx = Ethernet.get_type(ETH_TYPE_IPX)(self.data)
elif self.dsap == 0x42: # SAP_STP
self.data = self.stp = stp.STP(self.data)
def pack_hdr(self):
buf = dpkt.Packet.pack_hdr(self)
if self.is_snap: # add SNAP sublayer
oui = getattr(self, 'oui', 0)
_type = getattr(self, 'type', 0)
if not _type and isinstance(self.data, dpkt.Packet):
from .ethernet import Ethernet
try:
_type = Ethernet.get_type_rev(self.data.__class__)
except KeyError:
pass
buf += struct.pack('>IH', oui, _type)[1:]
return buf
def __len__(self): # add 5 bytes of SNAP header if needed
return self.__hdr_len__ + 5 * int(self.is_snap) + len(self.data)
def test_llc():
from . import ip
from . import ethernet
s = (b'\xaa\xaa\x03\x00\x00\x00\x08\x00\x45\x00\x00\x28\x07\x27\x40\x00\x80\x06\x1d'
b'\x39\x8d\xd4\x37\x3d\x3f\xf5\xd1\x69\xc0\x5f\x01\xbb\xb2\xd6\xef\x23\x38\x2b'
b'\x4f\x08\x50\x10\x42\x04\xac\x17\x00\x00')
llc_pkt = LLC(s)
ip_pkt = llc_pkt.data
assert isinstance(ip_pkt, ip.IP)
assert llc_pkt.type == ethernet.ETH_TYPE_IP
assert ip_pkt.dst == b'\x3f\xf5\xd1\x69'
assert str(llc_pkt) == str(s)
assert len(llc_pkt) == len(s)
# construction with SNAP header
llc_pkt = LLC(ssap=0xaa, dsap=0xaa, data=ip.IP(s[8:]))
assert str(llc_pkt) == str(s)
# no SNAP
llc_pkt = LLC(ssap=6, dsap=6, data=ip.IP(s[8:]))
assert isinstance(llc_pkt.data, ip.IP)
assert str(llc_pkt) == str(b'\x06\x06\x03' + s[8:])
def test_unpack_sap_ip():
from binascii import unhexlify
from . import ip
buf_llc = unhexlify(
'06' # dsap (SAP_IP)
'aa' # ssap
'03' # ctl
)
buf_ip = unhexlify(
'45' # _v_hl
'00' # tos
'0014' # len
'0000' # id
'0000' # off
'80' # ttl
'06' # p
'd47e' # sum
'11111111' # src
'22222222' # dst
)
buf = buf_llc + buf_ip
llc = LLC(buf)
assert isinstance(llc.data, ip.IP)
def test_unpack_exception_handling():
from binascii import unhexlify
buf_llc = unhexlify(
'aa' # dsap (SAP_IP)
'aa' # ssap
'03' # ctl
'111111' # oui
'2222' # type (not valid ethertype)
)
llc = LLC(buf_llc)
assert not isinstance(llc.data, dpkt.Packet)
def test_pack_hdr_invalid_class():
from binascii import unhexlify
class InvalidClass(dpkt.Packet):
__hdr__ = (('test', 'B', 0x22),)
llc = LLC(dsap=0xaa, ssap=0xaa, ctl=3, oui=0x111111, data=InvalidClass())
correct = unhexlify(
'aa' # dsap
'aa' # ssap
'03' # ctl
'111111' # oui
'0000' # type
'22' # data in test class header
)
assert bytes(llc) == correct
| 4,541 | 27.746835 | 88 |
py
|
dpkt
|
dpkt-master/dpkt/esp.py
|
# $Id: esp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Encapsulated Security Protocol."""
from __future__ import absolute_import
from . import dpkt
class ESP(dpkt.Packet):
"""Encapsulated Security Protocol.
Encapsulating Security Payload (ESP) is a member of the Internet Protocol Security (IPsec) set of protocols that
encrypt and authenticate the packets of data between computers using a Virtual Private Network (VPN). The focus
and layer on which ESP operates makes it possible for VPNs to function securely.
Attributes:
__hdr__: Header fields of ESP.
spi: (int): Security Parameters Index. An arbitrary value that, in combination with the destination
IP address and security protocol (ESP), uniquely identifies the SA for this datagram. (4 bytes)
spi: (int): Sequence number. This field contains a monotonically increasing counter value. (4 bytes)
"""
__hdr__ = (
('spi', 'I', 0),
('seq', 'I', 0)
)
| 1,026 | 37.037037 | 116 |
py
|
dpkt
|
dpkt-master/dpkt/dpkt.py
|
# $Id: dpkt.py 43 2007-08-02 22:42:59Z jon.oberheide $
# -*- coding: utf-8 -*-
"""Simple packet creation and parsing.
The dpkt project is a python module for fast, simple packet parsing, with definitions for the basic TCP/IP protocols.
"""
from __future__ import absolute_import, print_function
import copy
import struct
from functools import partial
from itertools import chain
from .compat import compat_ord, compat_izip, iteritems, ntole
class Error(Exception):
pass
class UnpackError(Error):
pass
class NeedData(UnpackError):
pass
class PackError(Error):
pass
# See the "creating parsers" documentation for how all of this works
class _MetaPacket(type):
def __new__(cls, clsname, clsbases, clsdict):
t = type.__new__(cls, clsname, clsbases, clsdict)
byte_order = getattr(t, '__byte_order__', '>')
st = getattr(t, '__hdr__', None)
if st is not None:
# XXX - __slots__ only created in __new__()
clsdict['__slots__'] = [x[0] for x in st] + ['data']
t = type.__new__(cls, clsname, clsbases, clsdict)
t.__hdr_fields__ = [x[0] for x in st]
t.__hdr_fmt__ = byte_order + ''.join(x[1] for x in st)
t.__hdr_len__ = struct.calcsize(t.__hdr_fmt__)
t.__hdr_defaults__ = dict(compat_izip(
t.__hdr_fields__, (x[2] for x in st)))
# process __bit_fields__
bit_fields = getattr(t, '__bit_fields__', None)
if bit_fields:
t.__bit_fields_defaults__ = {} # bit fields equivalent of __hdr_defaults__
for (ph_name, ph_struct, ph_default) in t.__hdr__: # ph: placeholder variable for the bit field
if ph_name in bit_fields:
field_defs = bit_fields[ph_name]
bits_total = sum(bf[1] for bf in field_defs) # total size in bits
bits_used = 0
# make sure the sum of bits matches the overall size of the placeholder field
# prepending ph_struct with byte_order implies standard size for the byte orders `=`, `<`, `>` and `!`
assert bits_total == struct.calcsize('%s%s' % (byte_order, ph_struct)) * 8, \
"the overall count of bits in [%s] as declared in __bit_fields__ " \
"does not match its struct size in __hdr__" % ph_name
for (bf_name, bf_size) in field_defs:
if bf_name.startswith('_'): # do not create properties for _private fields
bits_used += bf_size
continue
shift = bits_total - bits_used - bf_size
mask = (2**bf_size - 1) << shift # all zeroes except the field bits
mask_inv = (2**bits_total - 1) - mask # inverse mask
bits_used += bf_size
# calculate the default value for the bit field
bf_default = (t.__hdr_defaults__[ph_name] & mask) >> shift
t.__bit_fields_defaults__[bf_name] = bf_default
# create getter, setter and delete properties for the bit fields
def make_getter(ph_name=ph_name, mask=mask, shift=shift):
def getter_func(self):
ph_val = getattr(self, ph_name)
return (ph_val & mask) >> shift
return getter_func
def make_setter(ph_name=ph_name, mask_inv=mask_inv, shift=shift, bf_name=bf_name, max_val=2**bf_size):
def setter_func(self, bf_val):
# ensure the given value fits into the number of bits available
if bf_val >= max_val:
raise ValueError('value %s is too large for field %s' % (bf_val, bf_name))
ph_val = getattr(self, ph_name)
ph_val_new = (bf_val << shift) | (ph_val & mask_inv)
setattr(self, ph_name, ph_val_new)
return setter_func
# delete property to set the bit field back to its default value
def make_delete(bf_name=bf_name, bf_default=bf_default):
def delete_func(self):
setattr(self, bf_name, bf_default)
return delete_func
setattr(t, bf_name, property(make_getter(), make_setter(), make_delete()))
# optional map of functions for pretty printing
# {field_name: callable(field_value) -> str, ..}
# define as needed in the child protocol classes
#t.__pprint_funcs__ = {} - disabled here to keep the base class lightweight
# placeholder for __public_fields__, a class attribute used in __repr__ and pprint()
t.__public_fields__ = None
return t
class Packet(_MetaPacket("Temp", (object,), {})):
r"""Base packet class, with metaclass magic to generate members from self.__hdr__.
Attributes:
__hdr__: Packet header should be defined as a list of
(name, structfmt, default) tuples.
__byte_order__: Byte order, can be set to override the default ('>')
Example:
>>> class Foo(Packet):
... __hdr__ = (('foo', 'I', 1), ('bar', 'H', 2), ('baz', '4s', 'quux'))
...
>>> foo = Foo(bar=3)
>>> foo
Foo(bar=3)
>>> str(foo)
'\x00\x00\x00\x01\x00\x03quux'
>>> foo.bar
3
>>> foo.baz
'quux'
>>> foo.foo = 7
>>> foo.baz = 'whee'
>>> foo
Foo(baz='whee', foo=7, bar=3)
>>> Foo('hello, world!')
Foo(baz=' wor', foo=1751477356L, bar=28460, data='ld!')
"""
def __init__(self, *args, **kwargs):
"""Packet constructor with ([buf], [field=val,...]) prototype.
Arguments:
buf -- optional packet buffer to unpack
Optional keyword arguments correspond to members to set
(matching fields in self.__hdr__, or 'data').
"""
self.data = b''
if args:
try:
self.unpack(args[0])
except struct.error:
if len(args[0]) < self.__hdr_len__:
raise NeedData('got %d, %d needed at least' % (len(args[0]), self.__hdr_len__))
raise UnpackError('invalid %s: %r' %
(self.__class__.__name__, args[0]))
else:
if hasattr(self, '__hdr_fields__'):
for k in self.__hdr_fields__:
setattr(self, k, copy.copy(self.__hdr_defaults__[k]))
for k, v in iteritems(kwargs):
setattr(self, k, v)
if hasattr(self, '__hdr_fmt__'):
self._pack_hdr = partial(struct.pack, self.__hdr_fmt__)
def __len__(self):
return self.__hdr_len__ + len(self.data)
# legacy
def __iter__(self):
return iter((fld, getattr(self, fld)) for fld in self.__class__.__hdr_fields__)
def __getitem__(self, kls):
"""Return the 1st occurrence of the underlying <kls> data layer, raise KeyError otherwise."""
dd = self.data
while isinstance(dd, Packet):
if dd.__class__ == kls:
return dd
dd = dd.data
raise KeyError(kls)
def __contains__(self, kls):
"""Return True is the given <kls> data layer is present in the stack."""
try:
return bool(self.__getitem__(kls))
except KeyError:
return False
def _create_public_fields(self):
"""Construct __public_fields__ to be used inside __repr__ and pprint"""
l_ = []
for field_name, _, _ in getattr(self, '__hdr__', []):
# public fields defined in __hdr__; "public" means not starting with an underscore
if field_name[0] != '_':
l_.append(field_name) # (1)
# if a field name starts with an underscore, and does NOT contain more underscores,
# it is considered hidden and is ignored (good for fields reserved for future use)
# if a field name starts with an underscore, and DOES contain more underscores,
# it is viewed as a complex field where underscores separate the named properties
# of the class;
elif '_' in field_name[1:]:
# (1) search for these properties in __bit_fields__ where they are explicitly defined
if field_name in getattr(self, '__bit_fields__', {}):
for (prop_name, _) in self.__bit_fields__[field_name]:
if isinstance(getattr(self.__class__, prop_name, None), property):
l_.append(prop_name)
# (2) split by underscore into 1- and 2-component names and look for properties with such names;
# Example: _foo_bar_baz -> look for properties named "foo", "bar", "baz", "foo_bar" and "bar_baz"
# (but not "foo_bar_baz" since it contains more than one underscore)
else:
fns = field_name[1:].split('_')
for prop_name in chain(fns, ('_'.join(x) for x in zip(fns, fns[1:]))):
if isinstance(getattr(self.__class__, prop_name, None), property):
l_.append(prop_name)
# check for duplicates, there shouldn't be any
assert len(l_) == len(set(l_))
self.__class__.__public_fields__ = l_ # store it in the class attribute
def __repr__(self):
if self.__public_fields__ is None:
self._create_public_fields()
# Collect and display protocol fields in order:
# 1. public fields defined in __hdr__, unless their value is default
# 2. properties derived from _private fields defined in __hdr__ and __bit_fields__
# 3. dynamically added fields from self.__dict__, unless they are _private
# 4. self.data when it's present
l_ = []
# (1) and (2) are done via __public_fields__; just filter out defaults here
for field_name in self.__public_fields__:
field_value = getattr(self, field_name)
if (hasattr(self, '__hdr_defaults__') and
field_name in self.__hdr_defaults__ and
field_value == self.__hdr_defaults__[field_name]):
continue
if (hasattr(self, '__bit_fields_defaults__') and
field_name in self.__bit_fields_defaults__ and
field_value == self.__bit_fields_defaults__[field_name]):
continue
l_.append('%s=%r' % (field_name, field_value))
# (3)
l_.extend(
['%s=%r' % (attr_name, attr_value)
for attr_name, attr_value in iteritems(self.__dict__)
if attr_name[0] != '_' and # exclude _private attributes
attr_name != self.data.__class__.__name__.lower()]) # exclude fields like ip.udp
# (4)
if self.data:
l_.append('data=%r' % self.data)
return '%s(%s)' % (self.__class__.__name__, ', '.join(l_))
def pprint(self, indent=1):
"""Human friendly pretty-print."""
if self.__public_fields__ is None:
self._create_public_fields()
l_ = []
def add_field(fn, fv):
"""name=value, # pretty-print form (if available)"""
try:
l_.append('%s=%r, # %s' % (fn, fv, self.__pprint_funcs__[fn](fv)))
except (AttributeError, KeyError):
l_.append('%s=%r,' % (fn, fv))
for field_name in self.__public_fields__:
add_field(field_name, getattr(self, field_name))
for attr_name, attr_value in iteritems(self.__dict__):
if (attr_name[0] != '_' and # exclude _private attributes
attr_name != self.data.__class__.__name__.lower()): # exclude fields like ip.udp
if type(attr_value) == list and attr_value: # expand non-empty lists to print one item per line
l_.append('%s=[' % attr_name)
for av1 in attr_value:
l_.append(' ' + repr(av1) + ',') # XXX - TODO: support pretty-print
l_.append('],')
else:
add_field(attr_name, attr_value)
print('%s(' % self.__class__.__name__) # class name, opening brace
for ii in l_:
print(' ' * indent, '%s' % ii)
if self.data:
if isinstance(self.data, Packet): # recursively descend to lower layers
print(' ' * indent, 'data=', end='')
self.data.pprint(indent=indent + 2)
else:
print(' ' * indent, 'data=%r' % self.data)
print(' ' * (indent - 1), end='')
print(') # %s' % self.__class__.__name__) # closing brace # class name
def __str__(self):
return str(self.__bytes__())
def __bytes__(self):
return self.pack_hdr() + bytes(self.data)
def pack_hdr(self):
"""Return packed header string."""
try:
return self._pack_hdr(
*[getattr(self, k) for k in self.__hdr_fields__]
)
except (TypeError, struct.error):
vals = []
for k in self.__hdr_fields__:
v = getattr(self, k)
if isinstance(v, tuple):
vals.extend(v)
else:
vals.append(v)
try:
return struct.pack(self.__hdr_fmt__, *vals)
except struct.error as e:
raise PackError(str(e))
def pack(self):
"""Return packed header + self.data string."""
return bytes(self)
def unpack(self, buf):
"""Unpack packet header fields from buf, and set self.data."""
for k, v in compat_izip(self.__hdr_fields__,
struct.unpack(self.__hdr_fmt__, buf[:self.__hdr_len__])):
setattr(self, k, v)
self.data = buf[self.__hdr_len__:]
# XXX - ''.join([(len(`chr(x)`)==3) and chr(x) or '.' for x in range(256)])
__vis_filter = (
b'................................ !"#$%&\'()*+,-./0123456789:;<=>?'
b'@ABCDEFGHIJKLMNOPQRSTUVWXYZ[.]^_`abcdefghijklmnopqrstuvwxyz{|}~.'
b'................................................................'
b'................................................................')
def hexdump(buf, length=16):
"""Return a hexdump output string of the given buffer."""
n = 0
res = []
while buf:
line, buf = buf[:length], buf[length:]
hexa = ' '.join(['%02x' % compat_ord(x) for x in line])
line = line.translate(__vis_filter).decode('utf-8')
res.append(' %04d: %-*s %s' % (n, length * 3, hexa, line))
n += length
return '\n'.join(res)
def in_cksum_add(s, buf):
n = len(buf)
cnt = (n // 2) * 2
a = struct.unpack('<{}H'.format(n // 2), buf[:cnt]) # unpack as little endian words
res = s + sum(a)
if cnt != n:
res += compat_ord(buf[-1])
return res
def in_cksum_done(s):
s = (s >> 16) + (s & 0xffff)
s += (s >> 16)
return ntole(~s & 0xffff)
def in_cksum(buf):
"""Return computed Internet checksum."""
return in_cksum_done(in_cksum_add(0, buf))
def test_utils():
__buf = b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e'
__hd = ' 0000: 00 01 02 03 04 05 06 07 08 09 0a 0b 0c 0d 0e ...............'
h = hexdump(__buf)
assert (h == __hd)
assert in_cksum_add(0, __buf) == 12600 # endianness
c = in_cksum(__buf)
assert (c == 51150)
# test Packet.__getitem__ and __contains__ methods
def test_getitem_contains():
import pytest
class Foo(Packet):
__hdr__ = (('foo', 'I', 0),)
class Bar(Packet):
__hdr__ = (('bar', 'I', 0),)
class Baz(Packet):
__hdr__ = (('baz', 'I', 0),)
class Zeb(Packet):
pass
ff = Foo(foo=1, data=Bar(bar=2, data=Baz(attr=Zeb())))
# __contains__
assert Bar in ff
assert Baz in ff
assert Baz in ff.data
assert Zeb not in ff
assert Zeb not in Baz()
# __getitem__
assert isinstance(ff[Bar], Bar)
assert isinstance(ff[Baz], Baz)
assert isinstance(ff[Bar][Baz], Baz)
with pytest.raises(KeyError):
ff[Baz][Bar]
with pytest.raises(KeyError):
ff[Zeb]
with pytest.raises(KeyError):
Bar()[Baz]
def test_pack_hdr_overflow():
"""Try to fit too much data into struct packing"""
import pytest
class Foo(Packet):
__hdr__ = (
('foo', 'I', 1),
('bar', 'I', (1, 2)),
)
foo = Foo(foo=2**32)
with pytest.raises(PackError):
bytes(foo)
def test_bit_fields_overflow():
"""Try to fit too much data into too few bits"""
import pytest
class Foo(Packet):
__hdr__ = (
('_a_b', 'B', 0),
)
__bit_fields__ = {
'_a_b': (
('a', 2),
('b', 6),
)
}
foo = Foo()
with pytest.raises(ValueError):
foo.a = 5
def test_bit_field_size_is_calculated_using_standard_size():
class Foo(Packet):
__hdr__ = (
("_a_b", "L", 0),
)
__bit_fields__ = {
"_a_b": (
("a", 2*8),
("b", 2*8),
),
}
__byte_order__ = "<"
foo = Foo()
def test_pack_hdr_tuple():
"""Test the unpacking of a tuple for a single format string"""
class Foo(Packet):
__hdr__ = (
('bar', 'II', (1, 2)),
)
foo = Foo()
b = bytes(foo)
assert b == b'\x00\x00\x00\x01\x00\x00\x00\x02'
def test_unpacking_failure():
# during dynamic-sized unpacking in the subclass there may be struct.errors raised,
# but if the header has unpacked correctly, a different error is raised by the superclass
import pytest
class TestPacket(Packet):
__hdr__ = (('test', 'B', 0),)
def unpack(self, buf):
Packet.unpack(self, buf)
self.attribute = struct.unpack('B', buf[1:])
with pytest.raises(UnpackError, match="invalid TestPacket: "):
TestPacket(b'\x00') # header will unpack successfully
def test_repr():
"""complex test for __repr__, __public_fields__"""
class TestPacket(Packet):
__hdr__ = (
('_a_b', 'B', 1), # 'a' and 'b' bit fields
('_rsv', 'B', 0), # hidden reserved field
('_c_flag', 'B', 1), # 'c_flag' property
('d', 'B', 0) # regular field
)
__bit_fields__ = {
'_a_b': (
('a', 4),
('b', 4),
),
}
@property
def c_flag(self):
return (self.a | self.b)
# init with default values
test_packet = TestPacket()
# test repr with all default values so expect no output
# (except for the explicitly defined property, where dpkt doesn't process defaults yet)
assert repr(test_packet) == "TestPacket(c_flag=1)"
# init with non-default values
test_packet = TestPacket(b'\x12\x11\x00\x04')
# ensure the display fields were cached and propagated via class attribute
assert test_packet.__public_fields__ == ['a', 'b', 'c_flag', 'd']
# verify repr
assert repr(test_packet) == "TestPacket(a=1, b=2, c_flag=3, d=4)"
| 19,764 | 34.169039 | 126 |
py
|
dpkt
|
dpkt-master/dpkt/stp.py
|
# $Id: stp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Spanning Tree Protocol."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
class STP(dpkt.Packet):
"""Spanning Tree Protocol.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of STP.
TODO.
"""
__hdr__ = (
('proto_id', 'H', 0),
('v', 'B', 0),
('type', 'B', 0),
('flags', 'B', 0),
('root_id', '8s', b''),
('root_path', 'I', 0),
('bridge_id', '8s', b''),
('port_id', 'H', 0),
('_age', 'H', 0),
('_max_age', 'H', 0),
('_hello', 'H', 0),
('_fd', 'H', 0)
)
@property
def age(self):
return self._age >> 8
@age.setter
def age(self, age):
self._age = age << 8
@property
def max_age(self):
return self._max_age >> 8
@max_age.setter
def max_age(self, max_age):
self._max_age = max_age << 8
@property
def hello(self):
return self._hello >> 8
@hello.setter
def hello(self, hello):
self._hello = hello << 8
@property
def fd(self):
return self._fd >> 8
@fd.setter
def fd(self, fd):
self._fd = fd << 8
def test_stp():
buf = (b'\x00\x00\x02\x02\x3e\x80\x00\x08\x00\x27\xad\xa3\x41\x00\x00\x00\x00\x80\x00\x08\x00\x27'
b'\xad\xa3\x41\x80\x01\x00\x00\x14\x00\x02\x00\x0f\x00\x00\x00\x00\x00\x02\x00\x14\x00')
stp = STP(buf)
assert stp.proto_id == 0
assert stp.port_id == 0x8001
assert stp.age == 0
assert stp.max_age == 20
assert stp.hello == 2
assert stp.fd == 15
assert bytes(stp) == buf
stp.fd = 100
assert stp.pack_hdr()[-2:] == b'\x64\x00' # 100 << 8
def test_properties():
stp = STP()
stp.age = 10
assert stp.age == 10
stp.max_age = 20
assert stp.max_age == 20
stp.hello = 1234
assert stp.hello == 1234
| 2,005 | 19.895833 | 102 |
py
|
dpkt
|
dpkt-master/dpkt/pppoe.py
|
# $Id: pppoe.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""PPP-over-Ethernet."""
from __future__ import absolute_import
import struct
import codecs
from . import dpkt
from . import ppp
# RFC 2516 codes
PPPoE_PADI = 0x09
PPPoE_PADO = 0x07
PPPoE_PADR = 0x19
PPPoE_PADS = 0x65
PPPoE_PADT = 0xA7
PPPoE_SESSION = 0x00
class PPPoE(dpkt.Packet):
"""PPP-over-Ethernet.
The Point-to-Point Protocol over Ethernet (PPPoE) is a network protocol for encapsulating Point-to-Point Protocol
(PPP) frames inside Ethernet frames. It appeared in 1999, in the context of the boom of DSL as the solution for
tunneling packets over the DSL connection to the ISP's IP network, and from there to the rest of the Internet.
Attributes:
__hdr__: Header fields of PPPoE.
_v_type:
v: (int): Version (4 bits)
type: (int): Type (4 bits)
code: (int): Code. (1 byte)
session: (int): Session ID. (2 bytes)
len: (int): Payload length. (2 bytes)
"""
__hdr__ = (
('_v_type', 'B', 0x11),
('code', 'B', 0),
('session', 'H', 0),
('len', 'H', 0) # payload length
)
__bit_fields__ = {
'_v_type': (
('v', 4),
('type', 4),
)
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
try:
if self.code == 0:
# We need to use the pppoe.PPP header here, because PPPoE
# doesn't do the normal encapsulation.
self.data = self.ppp = PPP(self.data)
except dpkt.UnpackError:
pass
class PPP(ppp.PPP):
# Light version for protocols without the usual encapsulation, for PPPoE
__hdr__ = (
# Usuaully two-bytes, but while protocol compression is not recommended, it is supported
('p', 'B', ppp.PPP_IP),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.p & ppp.PFC_BIT == 0:
try:
self.p = struct.unpack('>H', buf[:2])[0]
except struct.error:
raise dpkt.NeedData
self.data = self.data[1:]
try:
self.data = self._protosw[self.p](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, struct.error, dpkt.UnpackError):
pass
def pack_hdr(self):
try:
# Protocol compression is *not* recommended (RFC2516), but we do it anyway
if self.p > 0xff:
return struct.pack('>H', self.p)
return dpkt.Packet.pack_hdr(self)
except struct.error as e:
raise dpkt.PackError(str(e))
def test_pppoe_discovery():
s = ("11070000002801010000010300046413"
"85180102000442524153010400103d0f"
"0587062484f2df32b9ddfd77bd5b")
s = codecs.decode(s, 'hex')
p = PPPoE(s)
assert p.code == PPPoE_PADO
assert p.v == 1
assert p.type == 1
s = ("11190000002801010000010300046413"
"85180102000442524153010400103d0f"
"0587062484f2df32b9ddfd77bd5b")
s = codecs.decode(s, 'hex')
p = PPPoE(s)
assert p.code == PPPoE_PADR
assert p.pack_hdr() == s[:6]
def test_pppoe_session():
s = "11000011000cc0210101000a050605fcd459"
s = codecs.decode(s, 'hex')
p = PPPoE(s)
assert p.code == PPPoE_SESSION
assert isinstance(p.ppp, PPP)
assert p.data.p == 0xc021 # LCP
assert len(p.data.data) == 10
assert p.data.pack_hdr() == b"\xc0\x21"
s = ("110000110066005760000000003c3a40fc000000000000000000000000000001"
"fc0000000002010000000000000100018100bf291f9700010102030405060708"
"090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f202122232425262728"
"292a2b2c2d2e2f3031323334")
s = codecs.decode(s, 'hex')
p = PPPoE(s)
assert p.code == PPPoE_SESSION
assert isinstance(p.ppp, PPP)
assert p.data.p == ppp.PPP_IP6
assert p.data.data.p == 58 # ICMPv6
assert p.ppp.pack_hdr() == b"\x57"
def test_ppp_packing():
p = PPP()
assert p.pack_hdr() == b"\x21"
p.p = 0xc021 # LCP
assert p.pack_hdr() == b"\xc0\x21"
def test_ppp_short():
import pytest
pytest.raises(dpkt.NeedData, PPP, b"\x00")
def test_pppoe_properties():
pppoe = PPPoE()
assert pppoe.v == 1
pppoe.v = 7
assert pppoe.v == 7
assert pppoe.type == 1
pppoe.type = 5
assert pppoe.type == 5
def test_pppoe_unpack_error():
from binascii import unhexlify
buf = unhexlify(
"11" # v/type
"00" # code
"0011" # session
"0066" # len
"00" # data
)
# this initialization swallows the UnpackError raised
pppoe = PPPoE(buf)
# unparsed data is still available
assert pppoe.data == b'\x00'
def test_ppp_pack_hdr():
import pytest
from binascii import unhexlify
buf = unhexlify(
'01' # protocol, with compression bit set
'ff' # incomplete data
)
ppp = PPP(buf)
ppp.p = 1234567
with pytest.raises(dpkt.PackError):
ppp.pack_hdr()
# XXX - TODO TLVs, etc.
| 5,158 | 25.187817 | 117 |
py
|
dpkt
|
dpkt-master/dpkt/pim.py
|
# $Id: pim.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Protocol Independent Multicast."""
from __future__ import absolute_import
from . import dpkt
class PIM(dpkt.Packet):
"""Protocol Independent Multicast.
Protocol Independent Multicast (PIM) is a collection of multicast routing protocols, each optimized for a different
environment. There are two main PIM protocols, PIM Sparse Mode and PIM Dense Mode. A third PIM protocol,
Bi-directional PIM, is less widely used.
Attributes:
__hdr__: Header fields of PIM.
_v_type: (int): Version (4 bits) and type (4 bits). PIM version number and Message type. (1 byte)
_rsvd: (int): Reserved. Always cleared to zero. (1 byte)
sum: (int): Checksum. The 16-bit one's complement of the one's complement sum of the entire PIM message,
excluding the data portion in the Register message.(2 bytes)
"""
__hdr__ = (
('_v_type', 'B', 0x20),
('_rsvd', 'B', 0),
('sum', 'H', 0)
)
__bit_fields__ = {
'_v_type': (
('v', 4),
('type', 4),
)
}
def __bytes__(self):
if not self.sum:
self.sum = dpkt.in_cksum(dpkt.Packet.__bytes__(self))
return dpkt.Packet.__bytes__(self)
def test_pim():
from binascii import unhexlify
buf = unhexlify(
'20' # _v_type
'00' # rsvd
'df93' # sum
'000100020069' # data
)
pimdata = PIM(buf)
assert bytes(pimdata) == buf
# force checksum recalculation
pimdata = PIM(buf)
pimdata.sum = 0
assert pimdata.sum == 0
assert bytes(pimdata) == buf
assert pimdata.v == 2
assert pimdata.type == 0
# test setters
buf_modified = unhexlify(
'31' # _v_type
'00' # rsvd
'df93' # sum
'000100020069' # data
)
pimdata.v = 3
pimdata.type = 1
assert bytes(pimdata) == buf_modified
| 2,041 | 26.972603 | 119 |
py
|
dpkt
|
dpkt-master/dpkt/edp.py
|
"""Extreme Discovery Protocol."""
from __future__ import absolute_import
import dpkt
class EDP(dpkt.Packet):
__hdr__ = (
('version', 'B', 1),
('reserved', 'B', 0),
('hlen', 'H', 0),
('sum', 'H', 0),
('seq', 'H', 0),
('mid', 'H', 0),
('mac', '6s', b'')
)
def __bytes__(self):
if not self.sum:
self.sum = dpkt.in_cksum(dpkt.Packet.__bytes__(self))
return dpkt.Packet.__bytes__(self)
class TestEDP(object):
"""
Test basic EDP functionality.
"""
@classmethod
def setup_class(cls):
from binascii import unhexlify
cls.buf = unhexlify(
'01' # version
'00' # reserved
'013c' # hlen
'9e76' # sum
'001b' # seq
'0000' # mid
'080027' # mac
'2d90ed990200240000000000000000000000000f020207000000000000000000000000000000009901010445584f532d32000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
'00000000000000000000000000000000099000004'
)
cls.p = EDP(cls.buf)
def test_version(self):
assert (self.p.version == 1)
def test_reserved(self):
assert (self.p.reserved == 0)
def test_hlen(self):
assert (self.p.hlen == 316)
def test_sum(self):
assert (self.p.sum == 40566)
def test_seq(self):
assert (self.p.seq == 27)
def test_mid(self):
assert (self.p.mid == 0)
def test_mac(self):
assert (self.p.mac == b"\x08\x00'-\x90\xed")
def test_bytes(self):
assert bytes(self.p) == self.buf
# force recalculation of the checksum
edp = EDP(self.buf)
edp.sum = 0
assert edp.sum == 0
assert bytes(edp) == self.buf
| 2,291 | 28.384615 | 127 |
py
|
dpkt
|
dpkt-master/dpkt/sll2.py
|
# -*- coding: utf-8 -*-
"""Linux libpcap "cooked v2" capture encapsulation."""
from __future__ import absolute_import
from . import arp
from . import dpkt
from . import ethernet
class SLL2(dpkt.Packet):
"""Linux libpcap "cooked v2" capture encapsulation.
See https://www.tcpdump.org/linktypes/LINKTYPE_LINUX_SLL2.html
Attributes:
__hdr__: Header fields of SLLv2.
"""
__hdr__ = (
('ethtype', 'H', ethernet.ETH_TYPE_IP),
('mbz', 'H', 0), # reserved
('intindex', 'i', 0), # the 1-based index of the interface on which the packet was observed
('hrd', 'H', arp.ARP_HRD_ETH),
('type', 'B', 0), # 0: to us, 1: bcast, 2: mcast, 3: other, 4: from us
('hlen', 'B', 6), # hardware address length
('hdr', '8s', b''), # first 8 bytes of link-layer header
)
_typesw = ethernet.Ethernet._typesw
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
try:
self.data = self._typesw[self.ethtype](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
pass
def test_sll2():
sll2data = (b'\x08\x00\x00\x00\x00\x00\x00\x03\x00\x01\x00\x06\x00\x0b\xdb\x52\x0e\x08\xf6\x7f'
b'\x45\x00\x00\x34\xcc\x6c\x40\x00\x40\x06\x74\x08\x82\xd9\xfa\x8e\x82\xd9\xfa\x0d')
sll2test = SLL2(sll2data)
assert sll2test.type == 0
assert sll2test.mbz == 0
assert sll2test.intindex == 3
assert sll2test.hrd == 1
assert sll2test.hlen == 6
assert sll2test.hdr == b'\x00\x0b\xdb\x52\x0e\x08\xf6\x7f'
assert sll2test.ethtype == 0x0800
# give invalid ethtype of 0x1234 to make sure error is handled
sll2data2 = (b'\x12\x34\x00\x00\x00\x00\x00\x03\x00\x01\x00\x06\x00\x0b\xdb\x52\x0e\x08\xf6\x7f'
b'\x45\x00\x00\x34\xcc\x6c\x40\x00\x40\x06\x74\x08\x82\xd9\xfa\x8e\x82\xd9\xfa\x0d')
sll2test2 = SLL2(sll2data2)
| 1,966 | 34.763636 | 100 |
py
|
dpkt
|
dpkt-master/dpkt/aoeata.py
|
# -*- coding: utf-8 -*-
"""ATA over Ethernet ATA command"""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
ATA_DEVICE_IDENTIFY = 0xec
class AOEATA(dpkt.Packet):
"""ATA over Ethernet ATA command.
See more about the AOEATA on
https://en.wikipedia.org/wiki/ATA_over_Ethernet
Attributes:
__hdr__: Header fields of AOEATA.
data: Message data.
"""
__hdr__ = (
('aflags', 'B', 0),
('errfeat', 'B', 0),
('scnt', 'B', 0),
('cmdstat', 'B', ATA_DEVICE_IDENTIFY),
('lba0', 'B', 0),
('lba1', 'B', 0),
('lba2', 'B', 0),
('lba3', 'B', 0),
('lba4', 'B', 0),
('lba5', 'B', 0),
('res', 'H', 0),
)
# XXX: in unpack, switch on ATA command like icmp does on type
def test_aoeata():
s = (b'\x03\x0a\x6b\x19\x00\x00\x00\x00\x45\x00\x00\x28\x94\x1f\x00\x00\xe3\x06\x99\xb4\x23\x2b'
b'\x24\x00\xde\x8e\x84\x42\xab\xd1\x00\x50\x00\x35\xe1\x29\x20\xd9\x00\x00\x00\x22\x9b\xf0\xe2\x04\x65\x6b')
aoeata = AOEATA(s)
assert (bytes(aoeata) == s)
| 1,130 | 24.704545 | 117 |
py
|
dpkt
|
dpkt-master/dpkt/ipx.py
|
# $Id: ipx.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Internetwork Packet Exchange."""
from __future__ import absolute_import
from . import dpkt
IPX_HDR_LEN = 30
class IPX(dpkt.Packet):
"""Internetwork Packet Exchange.
Internetwork Packet Exchange (IPX) is the network layer protocol in the IPX/SPX protocol suite.
IPX is derived from Xerox Network Systems' IDP. It also has the ability to act as a transport layer protocol.
Attributes:
__hdr__: Header fields of IPX.
sum: (int): Checksum (2 bytes).
len: (int): Packet Length (including the IPX header / 2 bytes).
tc: (int): Transport Control (hop count / 1 byte).
pt: (int): Packet Type (1 byte).
dst: (bytes): Destination address (12 bytes).
src: (bytes): Source address (12 bytes).
"""
__hdr__ = (
('sum', 'H', 0xffff),
('len', 'H', IPX_HDR_LEN),
('tc', 'B', 0),
('pt', 'B', 0),
('dst', '12s', b''),
('src', '12s', b'')
)
| 1,059 | 29.285714 | 113 |
py
|
dpkt
|
dpkt-master/dpkt/telnet.py
|
# $Id: telnet.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Telnet."""
from __future__ import print_function
from __future__ import absolute_import
import struct
from .compat import compat_ord
IAC = 255 # interpret as command:
DONT = 254 # you are not to use option
DO = 253 # please, you use option
WONT = 252 # I won't use option
WILL = 251 # I will use option
SB = 250 # interpret as subnegotiation
GA = 249 # you may reverse the line
EL = 248 # erase the current line
EC = 247 # erase the current character
AYT = 246 # are you there
AO = 245 # abort output--but let prog finish
IP = 244 # interrupt process--permanently
BREAK = 243 # break
DM = 242 # data mark--for connect. cleaning
NOP = 241 # nop
SE = 240 # end sub negotiation
EOR = 239 # end of record (transparent mode)
ABORT = 238 # Abort process
SUSP = 237 # Suspend process
xEOF = 236 # End of file: EOF is already used...
SYNCH = 242 # for telfunc calls
def strip_options(buf):
"""Return a list of lines and dict of options from telnet data."""
l_ = buf.split(struct.pack("B", IAC))
# print l_
b = []
d = {}
subopt = False
for w in l_:
if not w:
continue
o = compat_ord(w[0])
if o > SB:
# print 'WILL/WONT/DO/DONT/IAC', `w`
w = w[2:]
elif o == SE:
# print 'SE', `w`
w = w[1:]
subopt = False
elif o == SB:
# print 'SB', `w`
subopt = True
for opt in (b'USER', b'DISPLAY', b'TERM'):
p = w.find(opt + b'\x01')
if p != -1:
d[opt] = w[p + len(opt) + 1:].split(b'\x00', 1)[0]
w = None
elif subopt:
w = None
if w:
w = w.replace(b'\x00', b'\n').splitlines()
if not w[-1]:
w.pop()
b.extend(w)
return b, d
def test_telnet():
l_ = []
s = (b'\xff\xfb\x25\xff\xfa\x25\x00\x00\x00\xff\xf0\xff\xfd\x26\xff\xfa\x26\x05\xff\xf0\xff\xfa'
b'\x26\x01\x01\x02\xff\xf0\xff\xfb\x18\xff\xfb\x20\xff\xfb\x23\xff\xfb\x27\xff\xfc\x24\xff'
b'\xfa\x20\x00\x33\x38\x34\x30\x30\x2c\x33\x38\x34\x30\x30\xff\xf0\xff\xfa\x23\x00\x64\x6f'
b'\x75\x67\x68\x62\x6f\x79\x2e\x63\x69\x74\x69\x2e\x75\x6d\x69\x63\x68\x2e\x65\x64\x75\x3a'
b'\x30\x2e\x30\xff\xf0\xff\xfa\x27\x00\x00\x44\x49\x53\x50\x4c\x41\x59\x01\x64\x6f\x75\x67'
b'\x68\x62\x6f\x79\x2e\x63\x69\x74\x69\x2e\x75\x6d\x69\x63\x68\x2e\x65\x64\x75\x3a\x30\x2e'
b'\x30\x00\x55\x53\x45\x52\x01\x64\x75\x67\x73\x6f\x6e\x67\xff\xf0\xff\xfa\x18\x00\x58\x54'
b'\x45\x52\x4d\xff\xf0\xff\xfd\x03\xff\xfc\x01\xff\xfb\x1f\xff\xfa\x1f\x00\x50\x00\x28\xff'
b'\xf0\xff\xfd\x05\xff\xfb\x21\xff\xfd\x01\x66\x75\x67\x6c\x79\x0d\x00\x79\x6f\x64\x61\x0d'
b'\x00\x62\x61\x73\x68\x74\x61\x72\x64\x0d\x00')
l_.append(s)
s = (b'\xff\xfd\x01\xff\xfd\x03\xff\xfb\x18\xff\xfb\x1f\xff\xfa\x1f\x00\x58\x00\x32\xff\xf0\x61'
b'\x64\x6d\x69\x6e\x0d\x00\xff\xfa\x18\x00\x4c\x49\x4e\x55\x58\xff\xf0\x66\x6f\x6f\x62\x61'
b'\x72\x0d\x00\x65\x6e\x61\x62\x6c\x65\x0d\x00\x66\x6f\x6f\x62\x61\x72\x0d\x00\x0d\x00\x73'
b'\x68\x6f\x77\x20\x69\x70\x20\x69\x6e\x74\x20\x56\x6c\x61\x6e\x20\x36\x36\x36\x0d\x00')
l_.append(s)
s = (b'\xff\xfb\x25\xff\xfa\x25\x00\x00\x00\xff\xf0\xff\xfd\x26\xff\xfa\x26\x05\xff\xf0\xff\xfa'
b'\x26\x01\x01\x02\xff\xf0\xff\xfb\x26\xff\xfb\x18\xff\xfb\x20\xff\xfb\x23\xff\xfb\x27\xff'
b'\xfc\x24\xff\xfa\x20\x00\x33\x38\x34\x30\x30\x2c\x33\x38\x34\x30\x30\xff\xf0\xff\xfa\x23'
b'\x00\x64\x6f\x75\x67\x68\x62\x6f\x79\x2e\x63\x69\x74\x69\x2e\x75\x6d\x69\x63\x68\x2e\x65'
b'\x64\x75\x3a\x30\x2e\x30\xff\xf0\xff\xfa\x27\x00\x00\x44\x49\x53\x50\x4c\x41\x59\x01\x64'
b'\x6f\x75\x67\x68\x62\x6f\x79\x2e\x63\x69\x74\x69\x2e\x75\x6d\x69\x63\x68\x2e\x65\x64\x75'
b'\x3a\x30\x2e\x30\x00\x55\x53\x45\x52\x01\x64\x75\x67\x73\x6f\x6e\x67\xff\xf0\xff\xfa\x18'
b'\x00\x58\x54\x45\x52\x4d\xff\xf0\xff\xfd\x03\xff\xfc\x01\xff\xfb\x22\xff\xfa\x22\x03\x01'
b'\x03\x00\x03\x62\x03\x04\x02\x0f\x05\x00\xff\xff\x07\x62\x1c\x08\x02\x04\x09\x42\x1a\x0a'
b'\x02\x7f\x0b\x02\x15\x0c\x02\x17\x0d\x02\x12\x0e\x02\x16\x0f\x02\x11\x10\x02\x13\x11\x00'
b'\xff\xff\x12\x00\xff\xff\xff\xf0\xff\xfb\x1f\xff\xfa\x1f\x00\x50\x00\x28\xff\xf0\xff\xfd'
b'\x05\xff\xfb\x21\xff\xfa\x22\x01\x0f\xff\xf0\xff\xfd\x01\xff\xfe\x01\xff\xfa\x22\x03\x01'
b'\x80\x00\xff\xf0\xff\xfd\x01\x77\x65\x72\x64\x0d\x0a\xff\xfe\x01\x79\x6f\x64\x61\x0d\x0a'
b'\xff\xfd\x01\x64\x61\x72\x74\x68\x76\x61\x64\x65\x72\x0d\x0a\xff\xfe\x01')
l_.append(s)
exp = [([b'fugly', b'yoda', b'bashtard'], {b'USER': b'dugsong', b'DISPLAY': b'doughboy.citi.umich.edu:0.0'}),
([b'admin', b'foobar', b'enable', b'foobar', b'', b'show ip int Vlan 666'], {}),
([b'werd', b'yoda', b'darthvader'], {b'USER': b'dugsong', b'DISPLAY': b'doughboy.citi.umich.edu:0.0'})]
assert (list(map(strip_options, l_)) == exp)
def test_trailing_null():
from binascii import unhexlify
buf = unhexlify(
'0100020000'
)
b, d = strip_options(buf)
assert b == [b'\x01', b'\x02']
assert d == {}
| 5,344 | 44.29661 | 114 |
py
|
dpkt
|
dpkt-master/dpkt/bgp.py
|
# $Id: bgp.py 76 2011-01-06 15:51:30Z dugsong $
# -*- coding: utf-8 -*-
"""Border Gateway Protocol."""
from __future__ import print_function
from __future__ import absolute_import
import struct
import socket
from . import dpkt
from .compat import compat_ord
# Border Gateway Protocol 4 - RFC 4271
# Communities Attribute - RFC 1997
# Capabilities - RFC 3392
# Route Refresh - RFC 2918
# Route Reflection - RFC 4456
# Confederations - RFC 3065
# Cease Subcodes - RFC 4486
# NOPEER Community - RFC 3765
# Multiprotocol Extensions - 2858
# Advertisement of Multiple Paths in BGP - RFC 7911
# BGP Support for Four-Octet Autonomous System (AS) Number Spac - RFC 6793
# Message Types
OPEN = 1
UPDATE = 2
NOTIFICATION = 3
KEEPALIVE = 4
ROUTE_REFRESH = 5
# Attribute Types
ORIGIN = 1
AS_PATH = 2
NEXT_HOP = 3
MULTI_EXIT_DISC = 4
LOCAL_PREF = 5
ATOMIC_AGGREGATE = 6
AGGREGATOR = 7
COMMUNITIES = 8
ORIGINATOR_ID = 9
CLUSTER_LIST = 10
MP_REACH_NLRI = 14
MP_UNREACH_NLRI = 15
# Origin Types
ORIGIN_IGP = 0
ORIGIN_EGP = 1
INCOMPLETE = 2
# AS Path Types
AS_SET = 1
AS_SEQUENCE = 2
AS_CONFED_SEQUENCE = 3
AS_CONFED_SET = 4
# Reserved Communities Types
NO_EXPORT = 0xffffff01
NO_ADVERTISE = 0xffffff02
NO_EXPORT_SUBCONFED = 0xffffff03
NO_PEER = 0xffffff04
# Common AFI types
AFI_IPV4 = 1
AFI_IPV6 = 2
AFI_L2VPN = 25
# Multiprotocol SAFI types
SAFI_UNICAST = 1
SAFI_MULTICAST = 2
SAFI_UNICAST_MULTICAST = 3
SAFI_EVPN = 70
# OPEN Message Optional Parameters
AUTHENTICATION = 1
CAPABILITY = 2
# Capability Types
CAP_MULTIPROTOCOL = 1
CAP_ROUTE_REFRESH = 2
# NOTIFICATION Error Codes
MESSAGE_HEADER_ERROR = 1
OPEN_MESSAGE_ERROR = 2
UPDATE_MESSAGE_ERROR = 3
HOLD_TIMER_EXPIRED = 4
FSM_ERROR = 5
CEASE = 6
# Message Header Error Subcodes
CONNECTION_NOT_SYNCHRONIZED = 1
BAD_MESSAGE_LENGTH = 2
BAD_MESSAGE_TYPE = 3
# OPEN Message Error Subcodes
UNSUPPORTED_VERSION_NUMBER = 1
BAD_PEER_AS = 2
BAD_BGP_IDENTIFIER = 3
UNSUPPORTED_OPTIONAL_PARAMETER = 4
AUTHENTICATION_FAILURE = 5
UNACCEPTABLE_HOLD_TIME = 6
UNSUPPORTED_CAPABILITY = 7
# UPDATE Message Error Subcodes
MALFORMED_ATTRIBUTE_LIST = 1
UNRECOGNIZED_ATTRIBUTE = 2
MISSING_ATTRIBUTE = 3
ATTRIBUTE_FLAGS_ERROR = 4
ATTRIBUTE_LENGTH_ERROR = 5
INVALID_ORIGIN_ATTRIBUTE = 6
AS_ROUTING_LOOP = 7
INVALID_NEXT_HOP_ATTRIBUTE = 8
OPTIONAL_ATTRIBUTE_ERROR = 9
INVALID_NETWORK_FIELD = 10
MALFORMED_AS_PATH = 11
# Cease Error Subcodes
MAX_NUMBER_OF_PREFIXES_REACHED = 1
ADMINISTRATIVE_SHUTDOWN = 2
PEER_DECONFIGURED = 3
ADMINISTRATIVE_RESET = 4
CONNECTION_REJECTED = 5
OTHER_CONFIGURATION_CHANGE = 6
CONNECTION_COLLISION_RESOLUTION = 7
OUT_OF_RESOURCES = 8
class BGP(dpkt.Packet):
"""Border Gateway Protocol.
BGP is an inter-AS routing protocol.
See more about the BGP on
https://en.wikipedia.org/wiki/Border_Gateway_Protocol
Attributes:
__hdr__: Header fields of BGP.
marker: (bytes): Marker. Included for compatibility, must be set to all ones. (16 bytes)
len: (int): Length: Total length of the message in octets, including the header. (2 bytes)
type: (int): Type: Type of BGP message. (1 byte)
"""
__hdr__ = (
('marker', '16s', '\xff' * 16),
('len', 'H', 0),
('type', 'B', OPEN)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - self.__hdr_len__]
if self.type == OPEN:
self.data = self.open = self.Open(self.data)
elif self.type == UPDATE:
self.data = self.update = self.Update(self.data)
elif self.type == NOTIFICATION:
self.data = self.notification = self.Notification(self.data)
elif self.type == KEEPALIVE:
self.data = self.keepalive = self.Keepalive(self.data)
elif self.type == ROUTE_REFRESH:
self.data = self.route_refresh = self.RouteRefresh(self.data)
class Open(dpkt.Packet):
__hdr__ = (
('v', 'B', 4),
('asn', 'H', 0),
('holdtime', 'H', 0),
('identifier', 'I', 0),
('param_len', 'B', 0)
)
__hdr_defaults__ = {
'parameters': []
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l_ = []
plen = self.param_len
while plen > 0:
param = self.Parameter(self.data)
self.data = self.data[len(param):]
plen -= len(param)
l_.append(param)
self.data = self.parameters = l_
def __len__(self):
return self.__hdr_len__ + sum(map(len, self.parameters))
def __bytes__(self):
params = b''.join(map(bytes, self.parameters))
self.param_len = len(params)
return self.pack_hdr() + params
class Parameter(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
if self.type == AUTHENTICATION:
self.data = self.authentication = self.Authentication(self.data)
elif self.type == CAPABILITY:
self.data = self.capability = self.Capability(self.data)
class Authentication(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
)
class Capability(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
class Update(dpkt.Packet):
__hdr_defaults__ = {
'withdrawn': [],
'attributes': [],
'announced': []
}
def unpack(self, buf):
self.data = buf
# Withdrawn Routes
wlen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l_ = []
while wlen > 0:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
wlen -= len(route)
l_.append(route)
self.withdrawn = l_
# Path Attributes
plen = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l_ = []
while plen > 0:
attr = self.Attribute(self.data)
self.data = self.data[len(attr):]
plen -= len(attr)
l_.append(attr)
self.attributes = l_
# Announced Routes
l_ = []
while self.data:
if len(self.data) % 9 == 0:
route = ExtendedRouteIPV4(self.data)
else:
route = RouteIPV4(self.data)
self.data = self.data[len(route):]
l_.append(route)
self.announced = l_
def __len__(self):
return 2 + sum(map(len, self.withdrawn)) + \
2 + sum(map(len, self.attributes)) + \
sum(map(len, self.announced))
def __bytes__(self):
return struct.pack('>H', sum(map(len, self.withdrawn))) + \
b''.join(map(bytes, self.withdrawn)) + \
struct.pack('>H', sum(map(len, self.attributes))) + \
b''.join(map(bytes, self.attributes)) + \
b''.join(map(bytes, self.announced))
class Attribute(dpkt.Packet):
__hdr__ = (
('flags', 'B', 0),
('type', 'B', 0)
)
@property
def optional(self):
return (self.flags >> 7) & 0x1
@optional.setter
def optional(self, o):
self.flags = (self.flags & ~0x80) | ((o & 0x1) << 7)
@property
def transitive(self):
return (self.flags >> 6) & 0x1
@transitive.setter
def transitive(self, t):
self.flags = (self.flags & ~0x40) | ((t & 0x1) << 6)
@property
def partial(self):
return (self.flags >> 5) & 0x1
@partial.setter
def partial(self, p):
self.flags = (self.flags & ~0x20) | ((p & 0x1) << 5)
@property
def extended_length(self):
return (self.flags >> 4) & 0x1
@extended_length.setter
def extended_length(self, e):
self.flags = (self.flags & ~0x10) | ((e & 0x1) << 4)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.extended_length:
self.len = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
else:
self.len = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
self.data = self.data[:self.len]
if self.type == ORIGIN:
self.data = self.origin = self.Origin(self.data)
elif self.type == AS_PATH:
self.data = self.as_path = self.ASPath(self.data)
elif self.type == NEXT_HOP:
self.data = self.next_hop = self.NextHop(self.data)
elif self.type == MULTI_EXIT_DISC:
self.data = self.multi_exit_disc = self.MultiExitDisc(self.data)
elif self.type == LOCAL_PREF:
self.data = self.local_pref = self.LocalPref(self.data)
elif self.type == ATOMIC_AGGREGATE:
self.data = self.atomic_aggregate = self.AtomicAggregate(self.data)
elif self.type == AGGREGATOR:
self.data = self.aggregator = self.Aggregator(self.data)
elif self.type == COMMUNITIES:
self.data = self.communities = self.Communities(self.data)
elif self.type == ORIGINATOR_ID:
self.data = self.originator_id = self.OriginatorID(self.data)
elif self.type == CLUSTER_LIST:
self.data = self.cluster_list = self.ClusterList(self.data)
elif self.type == MP_REACH_NLRI:
self.data = self.mp_reach_nlri = self.MPReachNLRI(self.data)
elif self.type == MP_UNREACH_NLRI:
self.data = self.mp_unreach_nlri = self.MPUnreachNLRI(self.data)
def __len__(self):
if self.extended_length:
attr_len = 2
else:
attr_len = 1
return self.__hdr_len__ + attr_len + len(self.data)
def __bytes__(self):
if self.extended_length:
attr_len_str = struct.pack('>H', self.len)
else:
attr_len_str = struct.pack('B', self.len)
return self.pack_hdr() + attr_len_str + bytes(self.data)
class Origin(dpkt.Packet):
__hdr__ = (
('type', 'B', ORIGIN_IGP),
)
class ASPath(dpkt.Packet):
__hdr_defaults__ = {
'segments': []
}
def unpack(self, buf):
self.data = buf
l_ = []
as4 = len(self.data) == 6
while self.data:
if as4:
seg = self.ASPathSegment4(self.data)
else:
seg = self.ASPathSegment(self.data)
self.data = self.data[len(seg):]
l_.append(seg)
self.data = self.segments = l_
def __len__(self):
return sum(map(len, self.data))
def __bytes__(self):
return b''.join(map(bytes, self.data))
class ASPathSegment(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l_ = []
for i in range(self.len):
AS = struct.unpack('>H', self.data[:2])[0]
self.data = self.data[2:]
l_.append(AS)
self.data = self.path = l_
def __len__(self):
return self.__hdr_len__ + 2 * len(self.path)
def __bytes__(self):
as_str = b''
for AS in self.path:
as_str += struct.pack('>H', AS)
return self.pack_hdr() + as_str
class ASPathSegment4(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l_ = []
for i in range(self.len):
if len(self.data) >= 4:
AS = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
l_.append(AS)
self.path = l_
def __len__(self):
return self.__hdr_len__ + 4 * len(self.path)
def __bytes__(self):
as_str = b''
for AS in self.path:
as_str += struct.pack('>I', AS)
return self.pack_hdr() + as_str
class NextHop(dpkt.Packet):
__hdr__ = (
('ip', 'I', 0),
)
class MultiExitDisc(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class LocalPref(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class AtomicAggregate(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __bytes__(self):
return b''
class Aggregator(dpkt.Packet):
__hdr__ = (
('asn', 'H', 0),
('ip', 'I', 0)
)
class Communities(dpkt.Packet):
__hdr_defaults__ = {
'list': []
}
def unpack(self, buf):
self.data = buf
l_ = []
while self.data:
val = struct.unpack('>I', self.data[:4])[0]
if (0x00000000 <= val <= 0x0000ffff) or (0xffff0000 <= val <= 0xffffffff):
comm = self.ReservedCommunity(self.data[:4])
else:
comm = self.Community(self.data[:4])
self.data = self.data[len(comm):]
l_.append(comm)
self.data = self.list = l_
def __len__(self):
return sum(map(len, self.data))
def __bytes__(self):
return b''.join(map(bytes, self.data))
class Community(dpkt.Packet):
__hdr__ = (
('asn', 'H', 0),
('value', 'H', 0)
)
class ReservedCommunity(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class OriginatorID(dpkt.Packet):
__hdr__ = (
('value', 'I', 0),
)
class ClusterList(dpkt.Packet):
__hdr_defaults__ = {
'list': []
}
def unpack(self, buf):
self.data = buf
l_ = []
while self.data:
id = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
l_.append(id)
self.data = self.list = l_
def __len__(self):
return 4 * len(self.list)
def __bytes__(self):
cluster_str = b''
for val in self.list:
cluster_str += struct.pack('>I', val)
return cluster_str
class MPReachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
# Next Hop
hop_len = 4
if self.afi == AFI_IPV6:
hop_len = 16
l_ = []
nlen = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
# next_hop is kept for backward compatibility
self.next_hop = self.data[:nlen]
while nlen > 0:
hop = self.data[:hop_len]
l_.append(hop)
self.data = self.data[hop_len:]
nlen -= hop_len
self.next_hops = l_
# SNPAs
l_ = []
num_snpas = struct.unpack('B', self.data[:1])[0]
self.data = self.data[1:]
for i in range(num_snpas):
snpa = self.SNPA(self.data)
self.data = self.data[len(snpa):]
l_.append(snpa)
self.snpas = l_
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
elif self.afi == AFI_L2VPN:
Route = RouteEVPN
else:
Route = RouteGeneric
# Announced Routes
l_ = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l_.append(route)
self.data = self.announced = l_
def __len__(self):
return self.__hdr_len__ + \
1 + sum(map(len, self.next_hops)) + \
1 + sum(map(len, self.snpas)) + \
sum(map(len, self.announced))
def __bytes__(self):
return self.pack_hdr() + \
struct.pack('B', sum(map(len, self.next_hops))) + \
b''.join(map(bytes, self.next_hops)) + \
struct.pack('B', len(self.snpas)) + \
b''.join(map(bytes, self.snpas)) + \
b''.join(map(bytes, self.announced))
class SNPA(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:(self.len + 1) // 2]
class MPUnreachNLRI(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('safi', 'B', SAFI_UNICAST),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.afi == AFI_IPV4:
Route = RouteIPV4
elif self.afi == AFI_IPV6:
Route = RouteIPV6
elif self.afi == AFI_L2VPN:
Route = RouteEVPN
else:
Route = RouteGeneric
# Withdrawn Routes
l_ = []
while self.data:
route = Route(self.data)
self.data = self.data[len(route):]
l_.append(route)
self.data = self.withdrawn = l_
def __len__(self):
return self.__hdr_len__ + sum(map(len, self.data))
def __bytes__(self):
return self.pack_hdr() + b''.join(map(bytes, self.data))
class Notification(dpkt.Packet):
__hdr__ = (
('code', 'B', 0),
('subcode', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.error = self.data
class Keepalive(dpkt.Packet):
def unpack(self, buf):
pass
def __len__(self):
return 0
def __bytes__(self):
return b''
class RouteRefresh(dpkt.Packet):
__hdr__ = (
('afi', 'H', AFI_IPV4),
('rsvd', 'B', 0),
('safi', 'B', SAFI_UNICAST)
)
class RouteGeneric(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.prefix = self.data[:(self.len + 7) // 8]
class RouteIPV4(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) // 8]
tmp += (4 - len(tmp)) * b'\x00'
self.data = self.prefix = tmp
def __repr__(self):
cidr = '%s/%d' % (socket.inet_ntoa(self.prefix), self.len)
return '%s(%s)' % (self.__class__.__name__, cidr)
def __len__(self):
return self.__hdr_len__ + (self.len + 7) // 8
def __bytes__(self):
return self.pack_hdr() + self.prefix[:(self.len + 7) // 8]
class ExtendedRouteIPV4(RouteIPV4):
__hdr__ = (
('path_id', 'I', 0),
('len', 'B', 0),
)
def __repr__(self):
cidr = '%s/%d PathId %d' % (socket.inet_ntoa(self.prefix), self.len, self.path_id)
return '%s(%s)' % (self.__class__.__name__, cidr)
class RouteIPV6(dpkt.Packet):
__hdr__ = (
('len', 'B', 0),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
tmp = self.data[:(self.len + 7) // 8]
tmp += (16 - len(tmp)) * b'\x00'
self.data = self.prefix = tmp
def __len__(self):
return self.__hdr_len__ + (self.len + 7) // 8
def __bytes__(self):
return self.pack_hdr() + self.prefix[:(self.len + 7) // 8]
class RouteEVPN(dpkt.Packet):
__hdr__ = (
('type', 'B', 0),
('len', 'B', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.route_data = buf = self.data[:self.len]
self.data = self.data[self.len:]
# Get route distinguisher.
self.rd = buf[:8]
buf = buf[8:]
# Get route information. Not all fields are present on all route types.
if self.type != 0x3:
self.esi = buf[:10]
buf = buf[10:]
if self.type != 0x4:
self.eth_id = buf[:4]
buf = buf[4:]
if self.type == 0x2:
self.mac_address_length = compat_ord(buf[0])
if self.mac_address_length == 48:
self.mac_address = buf[1:7]
buf = buf[7:]
else:
self.mac_address = None
buf = buf[1:]
if self.type != 0x1:
self.ip_address_length = compat_ord(buf[0])
if self.ip_address_length == 128:
self.ip_address = buf[1:17]
buf = buf[17:]
elif self.ip_address_length == 32:
self.ip_address = buf[1:5]
buf = buf[5:]
else:
self.ip_address = None
buf = buf[1:]
if self.type in [0x1, 0x2]:
self.mpls_label_stack = buf[:3]
buf = buf[3:]
if self.len > len(buf):
self.mpls_label_stack += buf[:3]
def __len__(self):
return self.__hdr_len__ + self.len
def __bytes__(self):
return self.pack_hdr() + self.route_data
__bgp1 = b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x13\x04'
__bgp2 = (
b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x63\x02\x00\x00\x00\x48\x40\x01'
b'\x01\x00\x40\x02\x0a\x01\x02\x01\xf4\x01\xf4\x02\x01\xfe\xbb\x40\x03\x04\xc0\xa8\x00\x0f\x40\x05\x04'
b'\x00\x00\x00\x64\x40\x06\x00\xc0\x07\x06\xfe\xba\xc0\xa8\x00\x0a\xc0\x08\x0c\xfe\xbf\x00\x01\x03\x16'
b'\x00\x04\x01\x54\x00\xfa\x80\x09\x04\xc0\xa8\x00\x0f\x80\x0a\x04\xc0\xa8\x00\xfa\x16\xc0\xa8\x04'
)
__bgp3 = (
b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x79\x02\x00\x00\x00\x62\x40\x01'
b'\x01\x00\x40\x02\x00\x40\x05\x04\x00\x00\x00\x64\xc0\x10\x08\x00\x02\x01\x2c\x00\x00\x01\x2c\xc0\x80'
b'\x24\x00\x00\xfd\xe9\x40\x01\x01\x00\x40\x02\x04\x02\x01\x15\xb3\x40\x05\x04\x00\x00\x00\x2c\x80\x09'
b'\x04\x16\x05\x05\x05\x80\x0a\x04\x16\x05\x05\x05\x90\x0e\x00\x1e\x00\x01\x80\x0c\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x0c\x04\x04\x04\x00\x60\x18\x77\x01\x00\x00\x01\xf4\x00\x00\x01\xf4\x85'
)
__bgp4 = (
b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x2d\x01\x04\x00\xed\x00\x5a\xc6'
b'\x6e\x83\x7d\x10\x02\x06\x01\x04\x00\x01\x00\x01\x02\x02\x80\x00\x02\x02\x02\x00'
)
# BGP-EVPN type 1-4 packets for testing.
__bgp5 = (
b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x60\x02\x00\x00\x00\x49\x40\x01'
b'\x01\x00\x40\x02\x00\x40\x05\x04\x00\x00\x00\x64\xc0\x10\x10\x03\x0c\x00\x00\x00\x00\x00\x08\x00\x02'
b'\x03\xe8\x00\x00\x00\x02\x90\x0e\x00\x24\x00\x19\x46\x04\x01\x01\x01\x02\x00\x01\x19\x00\x01\x01\x01'
b'\x01\x02\x00\x02\x05\x00\x00\x03\xe8\x00\x00\x04\x00\x00\x00\x00\x00\x02\x00\x00\x02'
)
__bgp6 = (
b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x6f\x02\x00\x00\x00\x58\x40\x01'
b'\x01\x00\x40\x02\x00\x40\x05\x04\x00\x00\x00\x64\xc0\x10\x10\x03\x0c\x00\x00\x00\x00\x00\x08\x00\x02'
b'\x03\xe8\x00\x00\x00\x02\x90\x0e\x00\x33\x00\x19\x46\x04\x01\x01\x01\x02\x00\x02\x28\x00\x01\x01\x01'
b'\x01\x02\x00\x02\x05\x00\x00\x03\xe8\x00\x00\x04\x00\x00\x00\x00\x00\x02\x30\xcc\xaa\x02\x9c\xd8\x29'
b'\x20\xc0\xb4\x01\x02\x00\x00\x02\x00\x00\x00'
)
__bgp7 = (
b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x58\x02\x00\x00\x00\x41\x40\x01'
b'\x01\x00\x40\x02\x00\x40\x05\x04\x00\x00\x00\x64\xc0\x10\x10\x03\x0c\x00\x00\x00\x00\x00\x08\x00\x02'
b'\x03\xe8\x00\x00\x00\x02\x90\x0e\x00\x1c\x00\x19\x46\x04\x01\x01\x01\x02\x00\x03\x11\x00\x01\x01\x01'
b'\x01\x02\x00\x02\x00\x00\x00\x02\x20\xc0\xb4\x01\x02'
)
__bgp8 = (
b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x5f\x02\x00\x00\x00\x48\x40\x01'
b'\x01\x00\x40\x02\x00\x40\x05\x04\x00\x00\x00\x64\xc0\x10\x10\x03\x0c\x00\x00\x00\x00\x00\x08\x00\x02'
b'\x03\xe8\x00\x00\x00\x02\x90\x0e\x00\x23\x00\x19\x46\x04\x01\x01\x01\x02\x00\x04\x18\x00\x01\x01\x01'
b'\x01\x02\x00\x02\x05\x00\x00\x03\xe8\x00\x00\x04\x00\x00\x20\xc0\xb4\x01\x02'
)
__bgp9 = (
b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x7b\x02\x00\x00\x00\x64\x40\x01'
b'\x01\x00\x40\x02\x00\x40\x05\x04\x00\x00\x00\x64\xc0\x10\x10\x03\x0c\x00\x00\x00\x00\x00\x08\x00\x02'
b'\x03\xe8\x00\x00\x00\x02\x90\x0e\x00\x3f\x00\x19\x46\x04\x01\x01\x01\x02\x00\x02\x34\x00\x01\x01\x01'
b'\x01\x02\x00\x02\x05\x00\x00\x03\xe8\x00\x00\x04\x00\x00\x00\x00\x00\x02\x30\xcc\xaa\x02\x9c\xd8\x29'
b'\x80\xc0\xb4\x01\x02\xc0\xb4\x01\x02\xc0\xb4\x01\x02\xc0\xb4\x01\x02\x00\x00\x02\x00\x00\x00'
)
def test_pack():
assert (__bgp1 == bytes(BGP(__bgp1)))
assert (__bgp2 == bytes(BGP(__bgp2)))
assert (__bgp3 == bytes(BGP(__bgp3)))
assert (__bgp4 == bytes(BGP(__bgp4)))
assert (__bgp5 == bytes(BGP(__bgp5)))
assert (__bgp6 == bytes(BGP(__bgp6)))
assert (__bgp7 == bytes(BGP(__bgp7)))
assert (__bgp8 == bytes(BGP(__bgp8)))
assert (__bgp9 == bytes(BGP(__bgp9)))
def test_unpack():
b1 = BGP(__bgp1)
assert (b1.len == 19)
assert (b1.type == KEEPALIVE)
assert (b1.keepalive is not None)
b2 = BGP(__bgp2)
assert (b2.type == UPDATE)
assert (len(b2.update.withdrawn) == 0)
assert (len(b2.update.announced) == 1)
assert (len(b2.update.attributes) == 9)
a = b2.update.attributes[1]
assert (a.type == AS_PATH)
assert (a.len == 10)
assert (len(a.as_path.segments) == 2)
s = a.as_path.segments[0]
assert (s.type == AS_SET)
assert (s.len == 2)
assert (len(s.path) == 2)
assert (s.path[0] == 500)
a = b2.update.attributes[6]
assert (a.type == COMMUNITIES)
assert (a.len == 12)
assert (len(a.communities.list) == 3)
c = a.communities.list[0]
assert (c.asn == 65215)
assert (c.value == 1)
r = b2.update.announced[0]
assert (r.len == 22)
assert (r.prefix == b'\xc0\xa8\x04\x00')
b3 = BGP(__bgp3)
assert (b3.type == UPDATE)
assert (len(b3.update.withdrawn) == 0)
assert (len(b3.update.announced) == 0)
assert (len(b3.update.attributes) == 6)
a = b3.update.attributes[0]
assert (not a.optional)
assert (a.transitive)
assert (not a.partial)
assert (not a.extended_length)
assert (a.type == ORIGIN)
assert (a.len == 1)
o = a.origin
assert (o.type == ORIGIN_IGP)
a = b3.update.attributes[5]
assert (a.optional)
assert (not a.transitive)
assert (not a.partial)
assert (a.extended_length)
assert (a.type == MP_REACH_NLRI)
assert (a.len == 30)
m = a.mp_reach_nlri
assert (m.afi == AFI_IPV4)
assert (len(m.snpas) == 0)
assert (len(m.announced) == 1)
p = m.announced[0]
assert (p.len == 96)
b4 = BGP(__bgp4)
assert (b4.len == 45)
assert (b4.type == OPEN)
assert (b4.open.asn == 237)
assert (b4.open.param_len == 16)
assert (len(b4.open.parameters) == 3)
p = b4.open.parameters[0]
assert (p.type == CAPABILITY)
assert (p.len == 6)
c = p.capability
assert (c.code == CAP_MULTIPROTOCOL)
assert (c.len == 4)
assert (c.data == b'\x00\x01\x00\x01')
c = b4.open.parameters[2].capability
assert (c.code == CAP_ROUTE_REFRESH)
assert (c.len == 0)
b5 = BGP(__bgp5)
assert (b5.len == 96)
assert (b5.type == UPDATE)
assert (len(b5.update.withdrawn) == 0)
a = b5.update.attributes[-1]
assert (a.type == MP_REACH_NLRI)
assert (a.len == 36)
m = a.mp_reach_nlri
assert (m.afi == AFI_L2VPN)
assert (m.safi == SAFI_EVPN)
r = m.announced[0]
assert (r.type == 1)
assert (r.len == 25)
assert (r.rd == b'\x00\x01\x01\x01\x01\x02\x00\x02')
assert (r.esi == b'\x05\x00\x00\x03\xe8\x00\x00\x04\x00\x00')
assert (r.eth_id == b'\x00\x00\x00\x02')
assert (r.mpls_label_stack == b'\x00\x00\x02')
b6 = BGP(__bgp6)
assert (b6.len == 111)
assert (b6.type == UPDATE)
assert (len(b6.update.withdrawn) == 0)
a = b6.update.attributes[-1]
assert (a.type == MP_REACH_NLRI)
assert (a.len == 51)
m = a.mp_reach_nlri
assert (m.afi == AFI_L2VPN)
assert (m.safi == SAFI_EVPN)
r = m.announced[0]
assert (r.type == 2)
assert (r.len == 40)
assert (r.rd == b'\x00\x01\x01\x01\x01\x02\x00\x02')
assert (r.esi == b'\x05\x00\x00\x03\xe8\x00\x00\x04\x00\x00')
assert (r.eth_id == b'\x00\x00\x00\x02')
assert (r.mac_address_length == 48)
assert (r.mac_address == b'\xcc\xaa\x02\x9c\xd8\x29')
assert (r.ip_address_length == 32)
assert (r.ip_address == b'\xc0\xb4\x01\x02')
assert (r.mpls_label_stack == b'\x00\x00\x02\x00\x00\x00')
b7 = BGP(__bgp7)
assert (b7.len == 88)
assert (b7.type == UPDATE)
assert (len(b7.update.withdrawn) == 0)
a = b7.update.attributes[-1]
assert (a.type == MP_REACH_NLRI)
assert (a.len == 28)
m = a.mp_reach_nlri
assert (m.afi == AFI_L2VPN)
assert (m.safi == SAFI_EVPN)
r = m.announced[0]
assert (r.type == 3)
assert (r.len == 17)
assert (r.rd == b'\x00\x01\x01\x01\x01\x02\x00\x02')
assert (r.eth_id == b'\x00\x00\x00\x02')
assert (r.ip_address_length == 32)
assert (r.ip_address == b'\xc0\xb4\x01\x02')
b8 = BGP(__bgp8)
assert (b8.len == 95)
assert (b8.type == UPDATE)
assert (len(b8.update.withdrawn) == 0)
a = b8.update.attributes[-1]
assert (a.type == MP_REACH_NLRI)
assert (a.len == 35)
m = a.mp_reach_nlri
assert (m.afi == AFI_L2VPN)
assert (m.safi == SAFI_EVPN)
r = m.announced[0]
assert (r.type == 4)
assert (r.len == 24)
assert (r.rd == b'\x00\x01\x01\x01\x01\x02\x00\x02')
assert (r.esi == b'\x05\x00\x00\x03\xe8\x00\x00\x04\x00\x00')
assert (r.ip_address_length == 32)
assert (r.ip_address == b'\xc0\xb4\x01\x02')
b9 = BGP(__bgp9)
assert (b9.len == 123)
assert (b9.type == UPDATE)
assert (len(b9.update.withdrawn) == 0)
a = b9.update.attributes[-1]
assert (a.type == MP_REACH_NLRI)
assert (a.len == 63)
m = a.mp_reach_nlri
assert (m.afi == AFI_L2VPN)
assert (m.safi == SAFI_EVPN)
r = m.announced[0]
assert (r.type == 2)
assert (r.len == 52)
assert (r.rd == b'\x00\x01\x01\x01\x01\x02\x00\x02')
assert (r.esi == b'\x05\x00\x00\x03\xe8\x00\x00\x04\x00\x00')
assert (r.eth_id == b'\x00\x00\x00\x02')
assert (r.mac_address_length == 48)
assert (r.mac_address == b'\xcc\xaa\x02\x9c\xd8\x29')
assert (r.ip_address_length == 128)
assert (r.ip_address == b'\xc0\xb4\x01\x02\xc0\xb4\x01\x02\xc0\xb4\x01\x02\xc0\xb4\x01\x02')
assert (r.mpls_label_stack == b'\x00\x00\x02\x00\x00\x00')
def test_bgp_mp_nlri_20_1_mp_reach_nlri_next_hop():
# test for https://github.com/kbandla/dpkt/issues/485
__bgp = (
b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x6c\x02\x00\x00\x00\x55\x40\x01'
b'\x01\x00\x40\x02\x04\x02\x01\xfd\xe9\x80\x04\x04\x00\x00\x00\x00\x80\x0e\x40\x00\x02\x01\x20\x20\x01'
b'\x0d\xb8\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\xfe\x80\x00\x00\x00\x00\x00\x00\xc0\x01\x0b'
b'\xff\xfe\x7e\x00\x00\x00\x40\x20\x01\x0d\xb8\x00\x01\x00\x02\x40\x20\x01\x0d\xb8\x00\x01\x00\x01\x40'
b'\x20\x01\x0d\xb8\x00\x01\x00\x00'
)
assert (__bgp == bytes(BGP(__bgp)))
bgp = BGP(__bgp)
assert (len(bgp.data) == 89)
assert (bgp.type == UPDATE)
assert (len(bgp.update.withdrawn) == 0)
assert (len(bgp.update.announced) == 0)
assert (len(bgp.update.attributes) == 4)
attribute = bgp.update.attributes[0]
assert (attribute.type == ORIGIN)
assert (not attribute.optional)
assert (attribute.transitive)
assert (not attribute.partial)
assert (not attribute.extended_length)
assert (attribute.len == 1)
o = attribute.origin
assert (o.type == ORIGIN_IGP)
attribute = bgp.update.attributes[1]
assert (attribute.type == AS_PATH)
assert (not attribute.optional)
assert (attribute.transitive)
assert (not attribute.partial)
assert (not attribute.extended_length)
assert (attribute.flags == 64)
assert (attribute.len == 4)
assert (len(attribute.as_path.segments) == 1)
segment = attribute.as_path.segments[0]
assert (segment.type == AS_SEQUENCE)
assert (segment.len == 1)
assert (len(segment.path) == 1)
assert (segment.path[0] == 65001)
attribute = bgp.update.attributes[2]
assert (attribute.type == MULTI_EXIT_DISC)
assert (attribute.optional)
assert (not attribute.transitive)
assert (not attribute.partial)
assert (not attribute.extended_length)
assert (attribute.flags == 0x80)
assert (attribute.len == 4)
assert (attribute.multi_exit_disc.value == 0)
attribute = bgp.update.attributes[3]
assert (attribute.type == MP_REACH_NLRI)
assert (attribute.optional)
assert (not attribute.transitive)
assert (not attribute.partial)
assert (not attribute.extended_length)
assert (attribute.flags == 0x80)
assert (attribute.len == 64)
mp_reach_nlri = attribute.mp_reach_nlri
assert (mp_reach_nlri.afi == AFI_IPV6)
assert (mp_reach_nlri.safi == SAFI_UNICAST)
assert (len(mp_reach_nlri.snpas) == 0)
assert (len(mp_reach_nlri.announced) == 3)
prefix = mp_reach_nlri.announced[0]
assert (socket.inet_ntop(socket.AF_INET6, prefix.prefix) == '2001:db8:1:2::')
assert (prefix.len == 64)
prefix = mp_reach_nlri.announced[1]
assert (socket.inet_ntop(socket.AF_INET6, prefix.prefix) == '2001:db8:1:1::')
assert (prefix.len == 64)
prefix = mp_reach_nlri.announced[2]
assert (socket.inet_ntop(socket.AF_INET6, prefix.prefix) == '2001:db8:1::')
assert (prefix.len == 64)
assert (len(mp_reach_nlri.next_hops) == 2)
assert (socket.inet_ntop(socket.AF_INET6, mp_reach_nlri.next_hops[0]) == '2001:db8::1')
assert (socket.inet_ntop(socket.AF_INET6, mp_reach_nlri.next_hops[1]) == 'fe80::c001:bff:fe7e:0')
assert (mp_reach_nlri.next_hop == b''.join(mp_reach_nlri.next_hops))
def test_bgp_add_path_6_1_as_path():
# test for https://github.com/kbandla/dpkt/issues/481
# Error processing BGP data: packet 6 : message 1 of bgp-add-path.cap
# https://packetlife.net/media/captures/bgp-add-path.cap
__bgp = (
b'\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\x00\x59\x02\x00\x00\x00\x30\x40\x01'
b'\x01\x00\x40\x02\x06\x02\x01\x00\x00\xfb\xff\x40\x03\x04\x0a\x00\x0e\x01\x80\x04\x04\x00\x00\x00\x00'
b'\x40\x05\x04\x00\x00\x00\x64\x80\x0a\x04\x0a\x00\x22\x04\x80\x09\x04\x0a\x00\x0f\x01\x00\x00\x00\x01'
b'\x20\x05\x05\x05\x05\x00\x00\x00\x01\x20\xc0\xa8\x01\x05'
)
bgp = BGP(__bgp)
assert (__bgp == bytes(bgp))
assert (len(bgp) == 89)
assert (bgp.type == UPDATE)
assert (len(bgp.update.withdrawn) == 0)
announced = bgp.update.announced
assert (len(announced) == 2)
assert (announced[0].len == 32)
assert (announced[0].path_id == 1)
assert (socket.inet_ntop(socket.AF_INET, bytes(announced[0].prefix)) == '5.5.5.5')
assert (announced[1].len == 32)
assert (announced[1].path_id == 1)
assert (socket.inet_ntop(socket.AF_INET, bytes(announced[1].prefix)) == '192.168.1.5')
assert (len(bgp.update.attributes) == 7)
attribute = bgp.update.attributes[0]
assert (attribute.type == ORIGIN)
assert (not attribute.optional)
assert (attribute.transitive)
assert (not attribute.partial)
assert (not attribute.extended_length)
assert (attribute.flags == 0x40)
assert (attribute.len == 1)
assert (attribute.origin.type == ORIGIN_IGP)
attribute = bgp.update.attributes[1]
assert (attribute.type == AS_PATH)
assert (not attribute.optional)
assert (attribute.transitive)
assert (not attribute.partial)
assert (not attribute.extended_length)
assert (attribute.flags == 0x40)
assert (attribute.len == 6)
assert (len(attribute.as_path.segments) == 1)
segment = attribute.as_path.segments[0]
assert (segment.type == AS_SEQUENCE)
assert (segment.len == 1)
assert (len(segment.path) == 1)
assert (segment.path[0] == 64511)
attribute = bgp.update.attributes[2]
assert (attribute.type == NEXT_HOP)
assert (not attribute.optional)
assert (attribute.transitive)
assert (not attribute.partial)
assert (not attribute.extended_length)
assert (attribute.flags == 0x40)
assert (attribute.len == 4)
assert (socket.inet_ntop(socket.AF_INET, bytes(attribute.next_hop)) == '10.0.14.1')
attribute = bgp.update.attributes[3]
assert (attribute.type == MULTI_EXIT_DISC)
assert (attribute.optional)
assert (not attribute.transitive)
assert (not attribute.partial)
assert (not attribute.extended_length)
assert (attribute.flags == 0x80)
assert (attribute.len == 4)
assert (attribute.multi_exit_disc.value == 0)
attribute = bgp.update.attributes[4]
assert (attribute.type == LOCAL_PREF)
assert (not attribute.optional)
assert (attribute.transitive)
assert (not attribute.partial)
assert (not attribute.extended_length)
assert (attribute.flags == 0x40)
assert (attribute.len == 4)
assert (attribute.local_pref.value == 100)
attribute = bgp.update.attributes[5]
assert (attribute.type == CLUSTER_LIST)
assert (attribute.optional)
assert (not attribute.transitive)
assert (not attribute.partial)
assert (not attribute.extended_length)
assert (attribute.flags == 0x80)
assert (attribute.len == 4)
assert (socket.inet_ntop(socket.AF_INET, bytes(attribute.cluster_list)) == '10.0.34.4')
attribute = bgp.update.attributes[6]
assert (attribute.type == ORIGINATOR_ID)
assert (attribute.optional)
assert (not attribute.transitive)
assert (not attribute.partial)
assert (not attribute.extended_length)
assert (attribute.flags == 0x80)
assert (attribute.len == 4)
assert (socket.inet_ntop(socket.AF_INET, bytes(attribute.originator_id)) == '10.0.15.1')
def test_attribute_accessors():
from binascii import unhexlify
buf = unhexlify(
'00' # flags
'01' # type (ORIGIN)
'01' # length
'00' # Origin type
)
attribute = BGP.Update.Attribute(buf)
assert isinstance(attribute.data, BGP.Update.Attribute.Origin)
for attr in ['optional', 'transitive', 'partial', 'extended_length']:
assert getattr(attribute, attr) == 0
# check we can set..
setattr(attribute, attr, 1)
assert getattr(attribute, attr) == 1
# and also unset
setattr(attribute, attr, 0)
assert getattr(attribute, attr) == 0
def test_snpa():
from binascii import unhexlify
buf = unhexlify(
'04' # len (in semi-octets)
'1234' # data
)
snpa = BGP.Update.Attribute.MPReachNLRI.SNPA(buf)
assert snpa.len == 4 # length of the data in semi-octets
assert len(snpa) == 3 # length of the snpa in bytes (including header)
assert bytes(snpa) == buf
def test_mpreachnlri():
from binascii import unhexlify
buf = unhexlify(
'0000' # afi
'00' # safi
'00' # nlen
'01' # num SNPAs
# SNPA
'04' # len
'1234' # data
)
mp = BGP.Update.Attribute.MPReachNLRI(buf)
assert len(mp.snpas) == 1
assert bytes(mp) == buf
def test_notification():
from binascii import unhexlify
buf_notification = unhexlify(
'11' # code
'22' # subcode
'33' # error
)
notification = BGP.Notification(buf_notification)
assert notification.code == 0x11
assert notification.subcode == 0x22
assert notification.error == b'\x33'
assert bytes(notification) == buf_notification
buf_bgp_hdr = unhexlify(
'11111111111111111111111111111111' # marker
'0016' # len
'03' # type (NOTIFICATION)
)
bgp = BGP(buf_bgp_hdr + buf_notification)
assert hasattr(bgp, 'notification')
assert isinstance(bgp.data, BGP.Notification)
assert bgp.data.code == 0x11
assert bgp.data.subcode == 0x22
assert bgp.data.error == b'\x33'
assert bytes(bgp) == buf_bgp_hdr + buf_notification
def test_keepalive():
keepalive = BGP.Keepalive(b'\x11')
assert len(keepalive) == 0
assert bytes(keepalive) == b''
def test_routegeneric():
from binascii import unhexlify
buf = unhexlify(
'08' # len (bits)
'11' # prefix
)
routegeneric = RouteGeneric(buf)
assert routegeneric.len == 8
assert routegeneric.prefix == b'\x11'
assert bytes(routegeneric) == buf
assert len(routegeneric) == 2
def test_routeipv4():
from binascii import unhexlify
buf = unhexlify(
'08' # len (bits)
'11' # prefix
)
routeipv4 = RouteIPV4(buf)
assert routeipv4.len == 8 # prefix len in bits
assert routeipv4.prefix == b'\x11\x00\x00\x00'
assert repr(routeipv4) == "RouteIPV4(17.0.0.0/8)"
assert bytes(routeipv4) == buf
assert len(routeipv4) == 2 # header + prefix(bytes)
def test_routeipv6():
from binascii import unhexlify
buf = unhexlify(
'08' # len (bits)
'22' # prefix
)
routeipv6 = RouteIPV4(buf)
assert routeipv6.len == 8 # prefix len in bits
assert routeipv6.prefix == b'\x22\x00\x00\x00'
assert bytes(routeipv6) == buf
assert len(routeipv6) == 2 # header + prefix(bytes)
def test_extendedrouteipv4():
from binascii import unhexlify
buf = unhexlify(
'00000001' # path_id
'20' # len (bits)
'05050505' # prefix
)
extendedrouteipv4 = ExtendedRouteIPV4(buf)
assert extendedrouteipv4.path_id == 1
assert extendedrouteipv4.len == 32
assert extendedrouteipv4.prefix == unhexlify('05050505')
assert repr(extendedrouteipv4) == "ExtendedRouteIPV4(5.5.5.5/32 PathId 1)"
assert bytes(extendedrouteipv4) == buf
assert len(extendedrouteipv4) == len(buf)
def test_routeevpn():
from binascii import unhexlify
buf = unhexlify(
'02' # type
'1a' # len
# route distinguisher
'1111111111111111'
# esi
'22222222222222222222'
# eth_id
'33333333'
# mac address
'00' # len (bits)
# ip address
'00' # len (bits)
# mpls
'6666' # label stack
)
routeevpn = RouteEVPN(buf)
assert routeevpn.type == 2
assert routeevpn.len == 26
assert routeevpn.esi == unhexlify('22222222222222222222')
assert routeevpn.eth_id == unhexlify('33333333')
assert routeevpn.mac_address_length == 0
assert routeevpn.mac_address is None
assert routeevpn.ip_address_length == 0
assert routeevpn.ip_address is None
assert routeevpn.mpls_label_stack == unhexlify('6666')
assert bytes(routeevpn) == buf
assert len(routeevpn) == len(buf)
def test_route_refresh():
from binascii import unhexlify
buf_route_refresh = unhexlify(
'1111' # afi
'22' # rsvd
'33' # safi
)
route_refresh = BGP.RouteRefresh(buf_route_refresh)
assert route_refresh.afi == 0x1111
assert route_refresh.rsvd == 0x22
assert route_refresh.safi == 0x33
assert bytes(route_refresh) == buf_route_refresh
buf_bgp_hdr = unhexlify(
'11111111111111111111111111111111' # marker
'0017' # len
'05' # type (ROUTE_REFRESH)
)
bgp = BGP(buf_bgp_hdr + buf_route_refresh)
assert hasattr(bgp, 'route_refresh')
assert isinstance(bgp.data, BGP.RouteRefresh)
assert bgp.data.afi == 0x1111
assert bgp.data.rsvd == 0x22
assert bgp.data.safi == 0x33
assert bytes(bgp) == buf_bgp_hdr + buf_route_refresh
def test_mpunreachnlri():
from binascii import unhexlify
buf_routeipv4 = unhexlify(
'08' # len (bits)
'11' # prefix
)
buf_routeipv6 = unhexlify(
'08' # len (bits)
'22' # prefix
)
buf_routeevpn = unhexlify(
'02' # type
'1a' # len
# route distinguisher
'1111111111111111'
# esi
'22222222222222222222'
# eth_id
'33333333'
# mac address
'00' # len (bits)
# ip address
'00' # len (bits)
# mpls
'6666' # label stack
)
buf_routegeneric = unhexlify(
'08' # len (bits)
'33' # prefix
)
afi = struct.Struct('>H')
routes = (
(AFI_IPV4, buf_routeipv4, RouteIPV4),
(AFI_IPV6, buf_routeipv6, RouteIPV6),
(AFI_L2VPN, buf_routeevpn, RouteEVPN),
# this afi does not exist, so we will parse as RouteGeneric
(1234, buf_routegeneric, RouteGeneric),
)
for afi_id, buf, cls in routes:
buf = afi.pack(afi_id) + b'\xcc' + buf
mpu = BGP.Update.Attribute.MPUnreachNLRI(buf)
assert mpu.afi == afi_id
assert mpu.safi == 0xcc
assert len(mpu.data) == 1
route = mpu.data[0]
assert isinstance(route, cls)
assert bytes(mpu) == buf
assert len(mpu) == len(buf)
# test the unpacking of the routes, as an Attribute
attribute_hdr = struct.Struct('BBB')
for afi_id, buf, cls in routes:
buf_mpunreachnlri = afi.pack(afi_id) + b'\xcc' + buf
buf_attribute_hdr = attribute_hdr.pack(0, MP_UNREACH_NLRI, len(buf_mpunreachnlri))
buf = buf_attribute_hdr + buf_mpunreachnlri
attribute = BGP.Update.Attribute(buf)
assert isinstance(attribute.data, BGP.Update.Attribute.MPUnreachNLRI)
routes = attribute.data.data
assert len(routes) == 1
assert isinstance(routes[0], cls)
def test_update_withdrawn():
from binascii import unhexlify
buf_ipv4 = unhexlify(
'08' # len (bits)
'11' # prefix
)
packed_length = struct.Struct('>H').pack
wlen, plen = packed_length(len(buf_ipv4)), packed_length(0)
buf = wlen + buf_ipv4 + plen
update = BGP.Update(buf)
assert len(update.withdrawn) == 1
route = update.withdrawn[0]
assert isinstance(route, RouteIPV4)
assert bytes(update) == buf
def test_parameters():
from binascii import unhexlify
buf = unhexlify(
'44' # v
'1111' # asn
'2222' # holdtime
'33333333' # identifier
'03' # param_len
# Parameter
'01' # type (AUTHENTICATION)
'01' # len
# Authentication
'11' # code
)
bgp_open = BGP.Open(buf)
assert len(bgp_open.parameters) == 1
parameter = bgp_open.parameters[0]
assert isinstance(parameter, BGP.Open.Parameter)
assert isinstance(parameter.data, BGP.Open.Parameter.Authentication)
assert bytes(bgp_open) == buf
assert len(bgp_open) == len(buf)
def test_reservedcommunities():
from binascii import unhexlify
buf = unhexlify(
# ReservedCommunity
'00002222' # value
)
communities = BGP.Update.Attribute.Communities(buf)
assert len(communities.data) == 1
community = communities.data[0]
assert isinstance(community, BGP.Update.Attribute.Communities.ReservedCommunity)
assert len(community) == 4
assert bytes(community) == buf
assert len(communities) == 4
assert bytes(communities) == buf
| 51,351 | 31.37831 | 111 |
py
|
dpkt
|
dpkt-master/dpkt/qq.py
|
# $Id: qq.py 48 2008-05-27 17:31:15Z yardley $
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from .dpkt import Packet
# header_type
QQ_HEADER_BASIC_FAMILY = 0x02
QQ_HEADER_P2P_FAMILY = 0x00
QQ_HEADER_03_FAMILY = 0x03
QQ_HEADER_04_FAMILY = 0x04
QQ_HEADER_05_FAMILY = 0x05
header_type_str = [
"QQ_HEADER_P2P_FAMILY",
"Unknown Type",
"QQ_HEADER_03_FAMILY",
"QQ_HEADER_04_FAMILY",
"QQ_HEADER_05_FAMILY",
]
# command
QQ_CMD_LOGOUT = 0x0001
QQ_CMD_KEEP_ALIVE = 0x0002
QQ_CMD_MODIFY_INFO = 0x0004
QQ_CMD_SEARCH_USER = 0x0005
QQ_CMD_GET_USER_INFO = 0x0006
QQ_CMD_ADD_FRIEND = 0x0009
QQ_CMD_DELETE_FRIEND = 0x000A
QQ_CMD_ADD_FRIEND_AUTH = 0x000B
QQ_CMD_CHANGE_STATUS = 0x000D
QQ_CMD_ACK_SYS_MSG = 0x0012
QQ_CMD_SEND_IM = 0x0016
QQ_CMD_RECV_IM = 0x0017
QQ_CMD_REMOVE_SELF = 0x001C
QQ_CMD_REQUEST_KEY = 0x001D
QQ_CMD_LOGIN = 0x0022
QQ_CMD_GET_FRIEND_LIST = 0x0026
QQ_CMD_GET_ONLINE_OP = 0x0027
QQ_CMD_SEND_SMS = 0x002D
QQ_CMD_CLUSTER_CMD = 0x0030
QQ_CMD_TEST = 0x0031
QQ_CMD_GROUP_DATA_OP = 0x003C
QQ_CMD_UPLOAD_GROUP_FRIEND = 0x003D
QQ_CMD_FRIEND_DATA_OP = 0x003E
QQ_CMD_DOWNLOAD_GROUP_FRIEND = 0x0058
QQ_CMD_FRIEND_LEVEL_OP = 0x005C
QQ_CMD_PRIVACY_DATA_OP = 0x005E
QQ_CMD_CLUSTER_DATA_OP = 0x005F
QQ_CMD_ADVANCED_SEARCH = 0x0061
QQ_CMD_REQUEST_LOGIN_TOKEN = 0x0062
QQ_CMD_USER_PROPERTY_OP = 0x0065
QQ_CMD_TEMP_SESSION_OP = 0x0066
QQ_CMD_SIGNATURE_OP = 0x0067
QQ_CMD_RECV_MSG_SYS = 0x0080
QQ_CMD_RECV_MSG_FRIEND_CHANGE_STATUS = 0x0081
QQ_CMD_WEATHER_OP = 0x00A6
QQ_CMD_ADD_FRIEND_EX = 0x00A7
QQ_CMD_AUTHORIZE = 0X00A8
QQ_CMD_UNKNOWN = 0xFFFF
QQ_SUB_CMD_SEARCH_ME_BY_QQ_ONLY = 0x03
QQ_SUB_CMD_SHARE_GEOGRAPHY = 0x04
QQ_SUB_CMD_GET_FRIEND_LEVEL = 0x02
QQ_SUB_CMD_GET_CLUSTER_ONLINE_MEMBER = 0x01
QQ_05_CMD_REQUEST_AGENT = 0x0021
QQ_05_CMD_REQUEST_FACE = 0x0022
QQ_05_CMD_TRANSFER = 0x0023
QQ_05_CMD_REQUEST_BEGIN = 0x0026
QQ_CLUSTER_CMD_CREATE_CLUSTER = 0x01
QQ_CLUSTER_CMD_MODIFY_MEMBER = 0x02
QQ_CLUSTER_CMD_MODIFY_CLUSTER_INFO = 0x03
QQ_CLUSTER_CMD_GET_CLUSTER_INFO = 0x04
QQ_CLUSTER_CMD_ACTIVATE_CLUSTER = 0x05
QQ_CLUSTER_CMD_SEARCH_CLUSTER = 0x06
QQ_CLUSTER_CMD_JOIN_CLUSTER = 0x07
QQ_CLUSTER_CMD_JOIN_CLUSTER_AUTH = 0x08
QQ_CLUSTER_CMD_EXIT_CLUSTER = 0x09
QQ_CLUSTER_CMD_SEND_IM = 0x0A
QQ_CLUSTER_CMD_GET_ONLINE_MEMBER = 0x0B
QQ_CLUSTER_CMD_GET_MEMBER_INFO = 0x0C
QQ_CLUSTER_CMD_MODIFY_CARD = 0x0E
QQ_CLUSTER_CMD_GET_CARD_BATCH = 0x0F
QQ_CLUSTER_CMD_GET_CARD = 0x10
QQ_CLUSTER_CMD_COMMIT_ORGANIZATION = 0x11
QQ_CLUSTER_CMD_UPDATE_ORGANIZATION = 0x12
QQ_CLUSTER_CMD_COMMIT_MEMBER_ORGANIZATION = 0x13
QQ_CLUSTER_CMD_GET_VERSION_ID = 0x19
QQ_CLUSTER_CMD_SEND_IM_EX = 0x1A
QQ_CLUSTER_CMD_SET_ROLE = 0x1B
QQ_CLUSTER_CMD_TRANSFER_ROLE = 0x1C
QQ_CLUSTER_CMD_CREATE_TEMP = 0x30
QQ_CLUSTER_CMD_MODIFY_TEMP_MEMBER = 0x31
QQ_CLUSTER_CMD_EXIT_TEMP = 0x32
QQ_CLUSTER_CMD_GET_TEMP_INFO = 0x33
QQ_CLUSTER_CMD_MODIFY_TEMP_INFO = 0x34
QQ_CLUSTER_CMD_SEND_TEMP_IM = 0x35
QQ_CLUSTER_CMD_SUB_CLUSTER_OP = 0x36
QQ_CLUSTER_CMD_ACTIVATE_TEMP = 0x37
QQ_CLUSTER_SUB_CMD_ADD_MEMBER = 0x01
QQ_CLUSTER_SUB_CMD_REMOVE_MEMBER = 0x02
QQ_CLUSTER_SUB_CMD_GET_SUBJECT_LIST = 0x02
QQ_CLUSTER_SUB_CMD_GET_DIALOG_LIST = 0x01
QQ_SUB_CMD_GET_ONLINE_FRIEND = 0x2
QQ_SUB_CMD_GET_ONLINE_SERVICE = 0x3
QQ_SUB_CMD_UPLOAD_GROUP_NAME = 0x2
QQ_SUB_CMD_DOWNLOAD_GROUP_NAME = 0x1
QQ_SUB_CMD_SEND_TEMP_SESSION_IM = 0x01
QQ_SUB_CMD_BATCH_DOWNLOAD_FRIEND_REMARK = 0x0
QQ_SUB_CMD_UPLOAD_FRIEND_REMARK = 0x1
QQ_SUB_CMD_REMOVE_FRIEND_FROM_LIST = 0x2
QQ_SUB_CMD_DOWNLOAD_FRIEND_REMARK = 0x3
QQ_SUB_CMD_MODIFY_SIGNATURE = 0x01
QQ_SUB_CMD_DELETE_SIGNATURE = 0x02
QQ_SUB_CMD_GET_SIGNATURE = 0x03
QQ_SUB_CMD_GET_USER_PROPERTY = 0x01
QQ_SUB_CMD_GET_WEATHER = 0x01
QQ_FILE_CMD_HEART_BEAT = 0x0001
QQ_FILE_CMD_HEART_BEAT_ACK = 0x0002
QQ_FILE_CMD_TRANSFER_FINISHED = 0x0003
QQ_FILE_CMD_FILE_OP = 0x0007
QQ_FILE_CMD_FILE_OP_ACK = 0x0008
QQ_FILE_CMD_SENDER_SAY_HELLO = 0x0031
QQ_FILE_CMD_SENDER_SAY_HELLO_ACK = 0x0032
QQ_FILE_CMD_RECEIVER_SAY_HELLO = 0x0033
QQ_FILE_CMD_RECEIVER_SAY_HELLO_ACK = 0x0034
QQ_FILE_CMD_NOTIFY_IP_ACK = 0x003C
QQ_FILE_CMD_PING = 0x003D
QQ_FILE_CMD_PONG = 0x003E
QQ_FILE_CMD_YES_I_AM_BEHIND_FIREWALL = 0x0040
QQ_FILE_CMD_REQUEST_AGENT = 0x0001
QQ_FILE_CMD_CHECK_IN = 0x0002
QQ_FILE_CMD_FORWARD = 0x0003
QQ_FILE_CMD_FORWARD_FINISHED = 0x0004
QQ_FILE_CMD_IT_IS_TIME = 0x0005
QQ_FILE_CMD_I_AM_READY = 0x0006
command_str = {
0x0001: "QQ_CMD_LOGOUT",
0x0002: "QQ_CMD_KEEP_ALIVE",
0x0004: "QQ_CMD_MODIFY_INFO",
0x0005: "QQ_CMD_SEARCH_USER",
0x0006: "QQ_CMD_GET_USER_INFO",
0x0009: "QQ_CMD_ADD_FRIEND",
0x000A: "QQ_CMD_DELETE_FRIEND",
0x000B: "QQ_CMD_ADD_FRIEND_AUTH",
0x000D: "QQ_CMD_CHANGE_STATUS",
0x0012: "QQ_CMD_ACK_SYS_MSG",
0x0016: "QQ_CMD_SEND_IM",
0x0017: "QQ_CMD_RECV_IM",
0x001C: "QQ_CMD_REMOVE_SELF",
0x001D: "QQ_CMD_REQUEST_KEY",
0x0022: "QQ_CMD_LOGIN",
0x0026: "QQ_CMD_GET_FRIEND_LIST",
0x0027: "QQ_CMD_GET_ONLINE_OP",
0x002D: "QQ_CMD_SEND_SMS",
0x0030: "QQ_CMD_CLUSTER_CMD",
0x0031: "QQ_CMD_TEST",
0x003C: "QQ_CMD_GROUP_DATA_OP",
0x003D: "QQ_CMD_UPLOAD_GROUP_FRIEND",
0x003E: "QQ_CMD_FRIEND_DATA_OP",
0x0058: "QQ_CMD_DOWNLOAD_GROUP_FRIEND",
0x005C: "QQ_CMD_FRIEND_LEVEL_OP",
0x005E: "QQ_CMD_PRIVACY_DATA_OP",
0x005F: "QQ_CMD_CLUSTER_DATA_OP",
0x0061: "QQ_CMD_ADVANCED_SEARCH",
0x0062: "QQ_CMD_REQUEST_LOGIN_TOKEN",
0x0065: "QQ_CMD_USER_PROPERTY_OP",
0x0066: "QQ_CMD_TEMP_SESSION_OP",
0x0067: "QQ_CMD_SIGNATURE_OP",
0x0080: "QQ_CMD_RECV_MSG_SYS",
0x0081: "QQ_CMD_RECV_MSG_FRIEND_CHANGE_STATUS",
0x00A6: "QQ_CMD_WEATHER_OP",
0x00A7: "QQ_CMD_ADD_FRIEND_EX",
0x00A8: "QQ_CMD_AUTHORIZE",
0xFFFF: "QQ_CMD_UNKNOWN",
0x0021: "_CMD_REQUEST_AGENT",
# 0x0022: "_CMD_REQUEST_FACE", # FIXME - dup dict key
0x0023: "_CMD_TRANSFER",
# 0x0026: "_CMD_REQUEST_BEGIN", # FIXME - dup dict key
}
class QQBasicPacket(Packet):
__hdr__ = (
('header_type', 'B', 2),
('source', 'H', 0),
('command', 'H', 0),
('sequence', 'H', 0),
('qqNum', 'L', 0),
)
class QQ3Packet(Packet):
__hdr__ = (
('header_type', 'B', 3),
('command', 'B', 0),
('sequence', 'H', 0),
('unknown1', 'L', 0),
('unknown2', 'L', 0),
('unknown3', 'L', 0),
('unknown4', 'L', 0),
('unknown5', 'L', 0),
('unknown6', 'L', 0),
('unknown7', 'L', 0),
('unknown8', 'L', 0),
('unknown9', 'L', 0),
('unknown10', 'B', 1),
('unknown11', 'B', 0),
('unknown12', 'B', 0),
('source', 'H', 0),
('unknown13', 'B', 0),
)
class QQ5Packet(Packet):
__hdr__ = (
('header_type', 'B', 5),
('source', 'H', 0),
('unknown', 'H', 0),
('command', 'H', 0),
('sequence', 'H', 0),
('qqNum', 'L', 0),
)
| 6,842 | 29.013158 | 59 |
py
|
dpkt
|
dpkt-master/dpkt/gre.py
|
# $Id: gre.py 75 2010-08-03 14:42:19Z jon.oberheide $
# -*- coding: utf-8 -*-
"""Generic Routing Encapsulation."""
from __future__ import absolute_import
import struct
import codecs
from . import dpkt
from . import ethernet
from .compat import compat_izip
GRE_CP = 0x8000 # Checksum Present
GRE_RP = 0x4000 # Routing Present
GRE_KP = 0x2000 # Key Present
GRE_SP = 0x1000 # Sequence Present
GRE_SS = 0x0800 # Strict Source Route
GRE_AP = 0x0080 # Acknowledgment Present
GRE_opt_fields = (
(GRE_CP | GRE_RP, 'sum', 'H'), (GRE_CP | GRE_RP, 'off', 'H'),
(GRE_KP, 'key', 'I'), (GRE_SP, 'seq', 'I'), (GRE_AP, 'ack', 'I')
)
class GRE(dpkt.Packet):
"""Generic Routing Encapsulation.
Generic Routing Encapsulation, or GRE, is a protocol for encapsulating data packets that use one routing protocol
inside the packets of another protocol. "Encapsulating" means wrapping one data packet within another data packet,
like putting a box inside another box. GRE is one way to set up a direct point-to-point connection across a network,
for the purpose of simplifying connections between separate networks. It works with a variety of network layer
protocols.
Attributes:
__hdr__: Header fields of GRE.
flags: (int): Flag bits. (2 bytes)
p: (int): Protocol Type (2 bytes)
"""
__hdr__ = (
('flags', 'H', 0),
('p', 'H', 0x0800), # ETH_TYPE_IP
)
sre = ()
@property
def v(self):
return self.flags & 0x7
@v.setter
def v(self, v):
self.flags = (self.flags & ~0x7) | (v & 0x7)
@property
def recur(self):
"""Recursion control bits. (3 bits)"""
return (self.flags >> 5) & 0x7
@recur.setter
def recur(self, v):
self.flags = (self.flags & ~0xe0) | ((v & 0x7) << 5)
class SRE(dpkt.Packet):
__hdr__ = [
('family', 'H', 0),
('off', 'B', 0),
('len', 'B', 0)
]
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len]
def opt_fields_fmts(self):
if self.v == 0:
fields, fmts = [], []
opt_fields = GRE_opt_fields
else:
fields, fmts = ['len', 'callid'], ['H', 'H']
opt_fields = GRE_opt_fields[-2:]
for flags, field, fmt in opt_fields:
if self.flags & flags:
fields.append(field)
fmts.append(fmt)
return fields, fmts
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
fields, fmts = self.opt_fields_fmts()
if fields:
fmt = ''.join(fmts)
fmtlen = struct.calcsize(fmt)
vals = struct.unpack("!" + fmt, self.data[:fmtlen])
self.data = self.data[fmtlen:]
self.__dict__.update(dict(compat_izip(fields, vals)))
if self.flags & GRE_RP:
l_ = []
while True:
sre = self.SRE(self.data)
self.data = self.data[len(sre):]
l_.append(sre)
if not sre.len:
break
self.sre = l_
try:
self.data = ethernet.Ethernet._typesw[self.p](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
# data already set
pass
def __len__(self):
opt_fmtlen = struct.calcsize(''.join(self.opt_fields_fmts()[1]))
return self.__hdr_len__ + opt_fmtlen + sum(map(len, self.sre)) + len(self.data)
def __bytes__(self):
fields, fmts = self.opt_fields_fmts()
if fields:
vals = []
for f in fields:
vals.append(getattr(self, f))
opt_s = struct.pack('!' + ''.join(fmts), *vals)
else:
opt_s = b''
return self.pack_hdr() + opt_s + b''.join(map(bytes, self.sre)) + bytes(self.data)
def test_gre_v1():
# Runs all the test associated with this class/file
s = codecs.decode("3081880a0067178000068fb100083a76", 'hex') + b"A" * 103
g = GRE(s)
assert g.v == 1
assert g.p == 0x880a
assert g.seq == 430001
assert g.ack == 539254
assert g.callid == 6016
assert g.len == 103
assert g.data == b"A" * 103
assert len(g) == len(s)
s = codecs.decode("3001880a00b2001100083ab8", 'hex') + b"A" * 178
g = GRE(s)
assert g.v == 1
assert g.p == 0x880a
assert g.seq == 539320
assert g.callid == 17
assert g.len == 178
assert g.data == b"A" * 178
assert len(g) == len(s)
def test_gre_len():
from binascii import unhexlify
gre = GRE()
assert len(gre) == 4
buf = unhexlify("3081880a0067178000068fb100083a76") + b"\x41" * 103
gre = GRE(buf)
assert bytes(gre) == buf
assert len(gre) == len(buf)
def test_gre_accessors():
gre = GRE()
for attr in ['v', 'recur']:
print(attr)
assert hasattr(gre, attr)
assert getattr(gre, attr) == 0
setattr(gre, attr, 1)
assert getattr(gre, attr) == 1
def test_sre_creation():
from binascii import unhexlify
buf = unhexlify(
'0000' # family
'00' # off
'02' # len
'ffff'
)
sre = GRE.SRE(buf)
assert sre.data == b'\xff\xff'
assert len(sre) == 6
assert bytes(sre) == buf
def test_gre_nested_sre():
from binascii import unhexlify
buf = unhexlify(
'4000' # flags (GRE_RP)
'0800' # p (ETH_TYPE_IP)
'0001' # sum
'0002' # off
# SRE entry
'0003' # family
'04' # off
'02' # len
'ffff'
# SRE entry (no len => last element)
'0006' # family
'00' # off
'00' # len
)
gre = GRE(buf)
assert hasattr(gre, 'sre')
assert isinstance(gre.sre, list)
assert len(gre.sre) == 2
assert len(gre) == len(buf)
assert bytes(gre) == buf
assert gre.data == b''
def test_gre_next_layer():
from binascii import unhexlify
from . import ipx
buf = unhexlify(
'0000' # flags (NONE)
'8137' # p (ETH_TYPE_IPX)
# IPX packet
'0000' # sum
'0001' # len
'02' # tc
'03' # pt
'0102030405060708090a0b0c' # dst
'c0b0a0908070605040302010' # src
)
gre = GRE(buf)
assert hasattr(gre, 'ipx')
assert isinstance(gre.data, ipx.IPX)
assert gre.data.tc == 2
assert gre.data.src == unhexlify('c0b0a0908070605040302010')
assert gre.data.dst == unhexlify('0102030405060708090a0b0c')
assert len(gre) == len(buf)
assert bytes(gre) == buf
| 6,763 | 26.274194 | 120 |
py
|
dpkt
|
dpkt-master/dpkt/crc32c.py
|
# $Id: crc32c.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import array
# CRC-32C Checksum for SCTP
# http://tools.ietf.org/html/rfc3309
crc32c_table = (
0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4, 0xC79A971F,
0x35F1141C, 0x26A1E7E8, 0xD4CA64EB, 0x8AD958CF, 0x78B2DBCC,
0x6BE22838, 0x9989AB3B, 0x4D43CFD0, 0xBF284CD3, 0xAC78BF27,
0x5E133C24, 0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384, 0x9A879FA0,
0x68EC1CA3, 0x7BBCEF57, 0x89D76C54, 0x5D1D08BF, 0xAF768BBC,
0xBC267848, 0x4E4DFB4B, 0x20BD8EDE, 0xD2D60DDD, 0xC186FE29,
0x33ED7D2A, 0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5, 0x6DFE410E,
0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA, 0x30E349B1, 0xC288CAB2,
0xD1D83946, 0x23B3BA45, 0xF779DEAE, 0x05125DAD, 0x1642AE59,
0xE4292D5A, 0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595, 0x417B1DBC,
0xB3109EBF, 0xA0406D4B, 0x522BEE48, 0x86E18AA3, 0x748A09A0,
0x67DAFA54, 0x95B17957, 0xCBA24573, 0x39C9C670, 0x2A993584,
0xD8F2B687, 0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927, 0x96BF4DCC,
0x64D4CECF, 0x77843D3B, 0x85EFBE38, 0xDBFC821C, 0x2997011F,
0x3AC7F2EB, 0xC8AC71E8, 0x1C661503, 0xEE0D9600, 0xFD5D65F4,
0x0F36E6F7, 0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789, 0xEB1FCBAD,
0x197448AE, 0x0A24BB5A, 0xF84F3859, 0x2C855CB2, 0xDEEEDFB1,
0xCDBE2C45, 0x3FD5AF46, 0x7198540D, 0x83F3D70E, 0x90A324FA,
0x62C8A7F9, 0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36, 0x3CDB9BDD,
0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829, 0x82F63B78, 0x709DB87B,
0x63CD4B8F, 0x91A6C88C, 0x456CAC67, 0xB7072F64, 0xA457DC90,
0x563C5F93, 0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C, 0x92A8FC17,
0x60C37F14, 0x73938CE0, 0x81F80FE3, 0x55326B08, 0xA759E80B,
0xB4091BFF, 0x466298FC, 0x1871A4D8, 0xEA1A27DB, 0xF94AD42F,
0x0B21572C, 0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652, 0x65D122B9,
0x97BAA1BA, 0x84EA524E, 0x7681D14D, 0x2892ED69, 0xDAF96E6A,
0xC9A99D9E, 0x3BC21E9D, 0xEF087A76, 0x1D63F975, 0x0E330A81,
0xFC588982, 0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622, 0x38CC2A06,
0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2, 0xFF56BD19, 0x0D3D3E1A,
0x1E6DCDEE, 0xEC064EED, 0xC38D26C4, 0x31E6A5C7, 0x22B65633,
0xD0DDD530, 0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF, 0x8ECEE914,
0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0, 0xD3D3E1AB, 0x21B862A8,
0x32E8915C, 0xC083125F, 0x144976B4, 0xE622F5B7, 0xF5720643,
0x07198540, 0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F, 0xE330A81A,
0x115B2B19, 0x020BD8ED, 0xF0605BEE, 0x24AA3F05, 0xD6C1BC06,
0xC5914FF2, 0x37FACCF1, 0x69E9F0D5, 0x9B8273D6, 0x88D28022,
0x7AB90321, 0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81, 0x34F4F86A,
0xC69F7B69, 0xD5CF889D, 0x27A40B9E, 0x79B737BA, 0x8BDCB4B9,
0x988C474D, 0x6AE7C44E, 0xBE2DA0A5, 0x4C4623A6, 0x5F16D052,
0xAD7D5351
)
def add(crc, buf):
byte_buf = array.array('B', buf)
for b in byte_buf:
crc = (crc >> 8) ^ crc32c_table[(crc ^ b) & 0xff]
return crc
def done(crc):
tmp = ~crc & 0xffffffff
b0 = tmp & 0xff
b1 = (tmp >> 8) & 0xff
b2 = (tmp >> 16) & 0xff
b3 = (tmp >> 24) & 0xff
crc = (b0 << 24) | (b1 << 16) | (b2 << 8) | b3
return crc
def cksum(buf):
"""Return computed CRC-32c checksum."""
return done(add(0xffffffff, buf))
def test_crc32c():
def bswap32(x):
from struct import pack, unpack
return unpack('<I', pack('>I', x))[0]
# reference test value from CRC catalogue
# http://reveng.sourceforge.net/crc-catalogue/17plus.htm#crc.cat.crc-32c
# SCTP uses tranport-level mirrored byte ordering, so we bswap32
assert cksum(b'') == 0
assert cksum(b'123456789') == bswap32(0xe3069283)
| 4,364 | 42.65 | 76 |
py
|
dpkt
|
dpkt-master/dpkt/pmap.py
|
# $Id: pmap.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Portmap / rpcbind."""
from __future__ import absolute_import
from . import dpkt
PMAP_PROG = 100000
PMAP_PROCDUMP = 4
PMAP_VERS = 2
class Pmap(dpkt.Packet):
"""Portmap / rpcbind.
The port mapper (rpc.portmap or just portmap, or rpcbind) is an Open Network Computing Remote Procedure
Call (ONC RPC) service that runs on network nodes that provide other ONC RPC services. The port mapper service
always uses TCP or UDP port 111; a fixed port is required for it, as a client would not be able to get the
port number for the port mapper service from the port mapper itself.
Attributes:
__hdr__: Header fields of Pmap.
prog: (int) Program. (4 bytes)
vers: (int) Version. (4 bytes)
prot: (int) Protocol. (4 bytes)
port: (int) Port. (4 bytes)
"""
__hdr__ = (
('prog', 'I', 0),
('vers', 'I', 0),
('prot', 'I', 0),
('port', 'I', 0),
)
| 1,032 | 28.514286 | 114 |
py
|
dpkt
|
dpkt-master/dpkt/sctp.py
|
# $Id: sctp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Stream Control Transmission Protocol."""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
from . import crc32c
# Stream Control Transmission Protocol
# http://tools.ietf.org/html/rfc2960
# Chunk Types
DATA = 0
INIT = 1
INIT_ACK = 2
SACK = 3
HEARTBEAT = 4
HEARTBEAT_ACK = 5
ABORT = 6
SHUTDOWN = 7
SHUTDOWN_ACK = 8
ERROR = 9
COOKIE_ECHO = 10
COOKIE_ACK = 11
ECNE = 12
CWR = 13
SHUTDOWN_COMPLETE = 14
class SCTP(dpkt.Packet):
"""Stream Control Transmission Protocol.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of SCTP.
TODO.
"""
__hdr__ = (
('sport', 'H', 0),
('dport', 'H', 0),
('vtag', 'I', 0),
('sum', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
l_ = []
while self.data:
chunk = Chunk(self.data)
l_.append(chunk)
if len(chunk) == 0:
self.data = b''
break
self.data = self.data[len(chunk):]
self.chunks = l_
def __len__(self):
return self.__hdr_len__ + sum(len(x) for x in self.chunks)
def __bytes__(self):
l_ = [bytes(x) for x in self.chunks]
if self.sum == 0:
s = crc32c.add(0xffffffff, self.pack_hdr())
for x in l_:
s = crc32c.add(s, x)
self.sum = crc32c.done(s)
return self.pack_hdr() + b''.join(l_)
class Chunk(dpkt.Packet):
__hdr__ = (
('type', 'B', INIT),
('flags', 'B', 0),
('len', 'H', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.len - self.__hdr_len__]
self.padding = b'' # optional padding for DATA chunks
# SCTP DATA Chunked is padded, 4-bytes aligned
if self.type == DATA and self.len % 4:
plen = 4 - self.len % 4 # padded length
if plen:
pos = self.__hdr_len__ + len(self.data) # end of data in buf
self.padding = buf[pos:pos + plen]
def __len__(self):
return self.len + len(self.padding)
def __bytes__(self):
return self.pack_hdr() + bytes(self.data) + self.padding
__s = (b'\x80\x44\x00\x50\x00\x00\x00\x00\x30\xba\xef\x54\x01\x00\x00\x3c\x3b\xb9\x9c\x46\x00\x01'
b'\xa0\x00\x00\x0a\xff\xff\x2b\x2d\x7e\xb2\x00\x05\x00\x08\x9b\xe6\x18\x9b\x00\x05\x00\x08'
b'\x9b\xe6\x18\x9c\x00\x0c\x00\x06\x00\x05\x00\x00\x80\x00\x00\x04\xc0\x00\x00\x04\xc0\x06'
b'\x00\x08\x00\x00\x00\x00')
def test_sctp_pack():
sctp = SCTP(__s)
assert (__s == bytes(sctp))
sctp.sum = 0
assert (__s == bytes(sctp))
def test_sctp_unpack():
sctp = SCTP(__s)
assert (sctp.sport == 32836)
assert (sctp.dport == 80)
assert (len(sctp.chunks) == 1)
assert (len(sctp) == 72)
chunk = sctp.chunks[0]
assert (chunk.type == INIT)
assert (chunk.len == 60)
def test_sctp_data_chunk(): # https://github.com/kbandla/dpkt/issues/499
# packet 5 from 'sctp-www.cap' downloaded from https://wiki.wireshark.org/SampleCaptures
# chunk len == 419 so requires padding to a 4-byte boundary
d = (b'\x80\x44\x00\x50\xd2\x6a\xc1\xe5\x70\xe5\x5b\x4c\x00\x03\x01\xa3\x2b\x2d\x7e\xb2\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x47\x45\x54\x20\x2f\x20\x48\x54\x54\x50\x2f\x31\x2e\x31\x0d\x0a'
b'\x48\x6f\x73\x74\x3a\x20\x32\x30\x33\x2e\x32\x35\x35\x2e\x32\x35\x32\x2e\x31\x39\x34\x0d'
b'\x0a\x55\x73\x65\x72\x2d\x41\x67\x65\x6e\x74\x3a\x20\x4d\x6f\x7a\x69\x6c\x6c\x61\x2f\x35'
b'\x2e\x30\x20\x28\x58\x31\x31\x3b\x20\x55\x3b\x20\x4c\x69\x6e\x75\x78\x20\x69\x36\x38\x36'
b'\x3b\x20\x6b\x6f\x2d\x4b\x52\x3b\x20\x72\x76\x3a\x31\x2e\x37\x2e\x31\x32\x29\x20\x47\x65'
b'\x63\x6b\x6f\x2f\x32\x30\x30\x35\x31\x30\x30\x37\x20\x44\x65\x62\x69\x61\x6e\x2f\x31\x2e'
b'\x37\x2e\x31\x32\x2d\x31\x0d\x0a\x41\x63\x63\x65\x70\x74\x3a\x20\x74\x65\x78\x74\x2f\x78'
b'\x6d\x6c\x2c\x61\x70\x70\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x6d\x6c\x2c\x61\x70\x70'
b'\x6c\x69\x63\x61\x74\x69\x6f\x6e\x2f\x78\x68\x74\x6d\x6c\x2b\x78\x6d\x6c\x2c\x74\x65\x78'
b'\x74\x2f\x68\x74\x6d\x6c\x3b\x71\x3d\x30\x2e\x39\x2c\x74\x65\x78\x74\x2f\x70\x6c\x61\x69'
b'\x6e\x3b\x71\x3d\x30\x2e\x38\x2c\x69\x6d\x61\x67\x65\x2f\x70\x6e\x67\x2c\x2a\x2f\x2a\x3b'
b'\x71\x3d\x30\x2e\x35\x0d\x0a\x41\x63\x63\x65\x70\x74\x2d\x4c\x61\x6e\x67\x75\x61\x67\x65'
b'\x3a\x20\x6b\x6f\x2c\x65\x6e\x2d\x75\x73\x3b\x71\x3d\x30\x2e\x37\x2c\x65\x6e\x3b\x71\x3d'
b'\x30\x2e\x33\x0d\x0a\x41\x63\x63\x65\x70\x74\x2d\x45\x6e\x63\x6f\x64\x69\x6e\x67\x3a\x20'
b'\x67\x7a\x69\x70\x2c\x64\x65\x66\x6c\x61\x74\x65\x0d\x0a\x41\x63\x63\x65\x70\x74\x2d\x43'
b'\x68\x61\x72\x73\x65\x74\x3a\x20\x45\x55\x43\x2d\x4b\x52\x2c\x75\x74\x66\x2d\x38\x3b\x71'
b'\x3d\x30\x2e\x37\x2c\x2a\x3b\x71\x3d\x30\x2e\x37\x0d\x0a\x4b\x65\x65\x70\x2d\x41\x6c\x69'
b'\x76\x65\x3a\x20\x33\x30\x30\x0d\x0a\x43\x6f\x6e\x6e\x65\x63\x74\x69\x6f\x6e\x3a\x20\x6b'
b'\x65\x65\x70\x2d\x61\x6c\x69\x76\x65\x0d\x0a\x0d\x0a\x00') # <-- ends with \x00 padding
sctp = SCTP(d)
assert sctp.chunks
assert len(sctp.chunks) == 1
ch = sctp.chunks[0]
assert ch.type == DATA
assert ch.len == 419
assert len(ch) == 420 # 419 +1 byte padding
assert ch.data[-14:] == b'keep-alive\r\n\r\n' # no padding byte at the end
# no remaining sctp data
assert sctp.data == b''
# test packing of the padded chunk
assert bytes(ch) == d[SCTP.__hdr_len__:]
def test_malformed_sctp_data_chunk():
# packet 7964 from '4.pcap' downloaded from https://research.unsw.edu.au/projects/unsw-nb15-dataset
d = (b'\x27\x0f\xe1\xc3\xc2\x73\x4d\x32\x4f\x54\x27\x8c' #header
b'\x0b\x00\x00\x04' #chunk 0, COOKIE_ACK chunk
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00') #chunk 1, malformed DATA chunk, size labeled as 0
sctp = SCTP(d)
assert sctp.chunks
assert len(sctp.chunks) == 2
ch = sctp.chunks[1]
assert ch.type == DATA
assert ch.len == 0
assert len(ch) == 0
assert ch.data == b'\x00\x00'
# no remaining sctp data
assert sctp.data == b''
| 6,399 | 33.594595 | 103 |
py
|
dpkt
|
dpkt-master/dpkt/ppp.py
|
# $Id: ppp.py 65 2010-03-26 02:53:51Z dugsong $
# -*- coding: utf-8 -*-
"""Point-to-Point Protocol."""
from __future__ import absolute_import
import struct
from . import dpkt
# XXX - finish later
# http://www.iana.org/assignments/ppp-numbers
PPP_IP = 0x21 # Internet Protocol
PPP_IP6 = 0x57 # Internet Protocol v6
# Protocol field compression
PFC_BIT = 0x01
class PPP(dpkt.Packet):
"""Point-to-Point Protocol.
Point-to-Point Protocol (PPP) is a data link layer (layer 2) communication protocol between two routers directly
without any host or any other networking in between. It can provide connection authentication, transmission
encryption and data compression.
Note: This class is subclassed in PPPoE
Attributes:
__hdr__: Header fields of PPP.
addr: (int): Address. 0xFF, standard broadcast address. (1 byte)
cntrl: (int): Control. 0x03, unnumbered data. (1 byte)
p: (int): Protocol. PPP ID of embedded data. (1 byte)
"""
__hdr__ = (
('addr', 'B', 0xff),
('cntrl', 'B', 3),
('p', 'B', PPP_IP),
)
_protosw = {}
@classmethod
def set_p(cls, p, pktclass):
cls._protosw[p] = pktclass
@classmethod
def get_p(cls, p):
return cls._protosw[p]
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.p & PFC_BIT == 0:
try:
self.p = struct.unpack('>H', buf[2:4])[0]
except struct.error:
raise dpkt.NeedData
self.data = self.data[1:]
try:
self.data = self._protosw[self.p](self.data)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, struct.error, dpkt.UnpackError):
pass
def pack_hdr(self):
try:
if self.p > 0xff:
return struct.pack('>BBH', self.addr, self.cntrl, self.p)
return dpkt.Packet.pack_hdr(self)
except struct.error as e:
raise dpkt.PackError(str(e))
def __load_protos():
g = globals()
for k, v in g.items():
if k.startswith('PPP_'):
name = k[4:]
modname = name.lower()
try:
mod = __import__(modname, g, level=1)
PPP.set_p(v, getattr(mod, name))
except (ImportError, AttributeError):
continue
def _mod_init():
"""Post-initialization called when all dpkt modules are fully loaded"""
if not PPP._protosw:
__load_protos()
def test_ppp():
# Test protocol compression
s = b"\xff\x03\x21"
p = PPP(s)
assert p.p == 0x21
s = b"\xff\x03\x00\x21"
p = PPP(s)
assert p.p == 0x21
def test_ppp_short():
s = b"\xff\x03\x00"
import pytest
pytest.raises(dpkt.NeedData, PPP, s)
def test_packing():
p = PPP()
assert p.pack_hdr() == b"\xff\x03\x21"
p.p = 0xc021 # LCP
assert p.pack_hdr() == b"\xff\x03\xc0\x21"
def test_ppp_classmethods():
import pytest
class TestProto(dpkt.Packet):
pass
proto_number = 123
# asserting that this proto is not currently added
with pytest.raises(KeyError):
PPP.get_p(proto_number)
PPP.set_p(proto_number, TestProto)
assert PPP.get_p(proto_number) == TestProto
# we need to reset the class, or impact other tests
del PPP._protosw[proto_number]
def test_unpacking_exceptions():
from dpkt import ip
from binascii import unhexlify
buf_ppp = unhexlify(
'ff' # addr
'03' # cntrl
'21' # p (PPP_IP)
)
buf_ip = unhexlify(
'45' # _v_hl
'00' # tos
'0014' # len
'0000' # id
'0000' # off
'80' # ttl
'06' # p
'd47e' # sum
'11111111' # src
'22222222' # dst
)
buf = buf_ppp + buf_ip
ppp = PPP(buf)
assert hasattr(ppp, 'ip')
assert isinstance(ppp.data, ip.IP)
assert bytes(ppp) == buf
def test_ppp_packing_error():
import pytest
# addr is a 1-byte field, so this will overflow when packing
ppp = PPP(p=257, addr=1234)
with pytest.raises(dpkt.PackError):
ppp.pack_hdr()
def test_proto_loading():
# test that failure to load protocol handlers isn't catastrophic
standard_protos = PPP._protosw
# delete existing protos
PPP._protosw = {}
assert not PPP._protosw
# inject a new global variable to be picked up
globals()['PPP_NON_EXISTENT_PROTO'] = "FAIL"
_mod_init()
# we should get the same answer as if NON_EXISTENT_PROTO didn't exist
assert PPP._protosw == standard_protos
| 4,674 | 23.73545 | 116 |
py
|
dpkt
|
dpkt-master/dpkt/rpc.py
|
# $Id: rpc.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Remote Procedure Call."""
from __future__ import absolute_import
import struct
from . import dpkt
# RPC.dir
CALL = 0
REPLY = 1
# RPC.Auth.flavor
AUTH_NONE = AUTH_NULL = 0
AUTH_UNIX = 1
AUTH_SHORT = 2
AUTH_DES = 3
# RPC.Reply.stat
MSG_ACCEPTED = 0
MSG_DENIED = 1
# RPC.Reply.Accept.stat
SUCCESS = 0
PROG_UNAVAIL = 1
PROG_MISMATCH = 2
PROC_UNAVAIL = 3
GARBAGE_ARGS = 4
SYSTEM_ERR = 5
# RPC.Reply.Reject.stat
RPC_MISMATCH = 0
AUTH_ERROR = 1
class RPC(dpkt.Packet):
"""Remote Procedure Call.
RFC 5531: https://tools.ietf.org/html/rfc5531
TODO: Longer class information....
Attributes:
__hdr__: Header fields of RPC.
TODO.
"""
__hdr__ = (
('xid', 'I', 0),
('dir', 'I', CALL)
)
class Auth(dpkt.Packet):
__hdr__ = (('flavor', 'I', AUTH_NONE), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
n = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:4 + n]
def __len__(self):
return 8 + len(self.data)
def __bytes__(self):
return self.pack_hdr() + struct.pack('>I', len(self.data)) + bytes(self.data)
class Call(dpkt.Packet):
__hdr__ = (
('rpcvers', 'I', 2),
('prog', 'I', 0),
('vers', 'I', 0),
('proc', 'I', 0)
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.cred = RPC.Auth(self.data)
self.verf = RPC.Auth(self.data[len(self.cred):])
self.data = self.data[len(self.cred) + len(self.verf):]
def __len__(self):
return len(bytes(self)) # XXX
def __bytes__(self):
return dpkt.Packet.__bytes__(self) + \
bytes(getattr(self, 'cred', RPC.Auth())) + \
bytes(getattr(self, 'verf', RPC.Auth())) + \
bytes(self.data)
class Reply(dpkt.Packet):
__hdr__ = (('stat', 'I', MSG_ACCEPTED), )
class Accept(dpkt.Packet):
__hdr__ = (('stat', 'I', SUCCESS), )
def unpack(self, buf):
self.verf = RPC.Auth(buf)
buf = buf[len(self.verf):]
self.stat = struct.unpack('>I', buf[:4])[0]
if self.stat == SUCCESS:
self.data = buf[4:]
elif self.stat == PROG_MISMATCH:
self.low, self.high = struct.unpack('>II', buf[4:12])
self.data = buf[12:]
def __len__(self):
if self.stat == PROG_MISMATCH:
n = 8
else:
n = 0
return len(self.verf) + 4 + n + len(self.data)
def __bytes__(self):
if self.stat == PROG_MISMATCH:
return bytes(self.verf) + \
struct.pack('>III', self.stat, self.low, self.high) + self.data
return bytes(self.verf) + dpkt.Packet.__bytes__(self)
class Reject(dpkt.Packet):
__hdr__ = (('stat', 'I', AUTH_ERROR), )
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.stat == RPC_MISMATCH:
self.low, self.high = struct.unpack('>II', self.data[:8])
self.data = self.data[8:]
elif self.stat == AUTH_ERROR:
self.why = struct.unpack('>I', self.data[:4])[0]
self.data = self.data[4:]
def __len__(self):
if self.stat == RPC_MISMATCH:
n = 8
elif self.stat == AUTH_ERROR:
n = 4
else:
n = 0
return 4 + n + len(self.data)
def __bytes__(self):
if self.stat == RPC_MISMATCH:
return struct.pack('>III', self.stat, self.low, self.high) + self.data
elif self.stat == AUTH_ERROR:
return struct.pack('>II', self.stat, self.why) + self.data
return dpkt.Packet.__bytes__(self)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.stat == MSG_ACCEPTED:
self.data = self.accept = self.Accept(self.data)
elif self.stat == MSG_DENIED:
self.data = self.reject = self.Reject(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.dir == CALL:
self.data = self.call = self.Call(self.data)
elif self.dir == REPLY:
self.data = self.reply = self.Reply(self.data)
def unpack_xdrlist(cls, buf):
l_ = []
while buf:
if buf.startswith(b'\x00\x00\x00\x01'):
p = cls(buf[4:])
l_.append(p)
buf = p.data
elif buf.startswith(b'\x00\x00\x00\x00'):
break
else:
raise dpkt.UnpackError('invalid XDR list')
return l_
def pack_xdrlist(*args):
return b'\x00\x00\x00\x01'.join(map(bytes, args)) + b'\x00\x00\x00\x00'
def test_auth():
from binascii import unhexlify
auth1 = RPC.Auth()
assert auth1.flavor == AUTH_NONE
buf = unhexlify('0000000000000000')
assert bytes(auth1) == buf
auth2 = RPC.Auth(buf)
assert auth2.flavor == AUTH_NONE
assert len(auth2) == 8
def test_call():
from binascii import unhexlify
call1 = RPC.Call()
assert call1.rpcvers == 2
assert call1.prog == 0
assert call1.vers == 0
assert call1.proc == 0
buf = unhexlify(
'0000000200000000000000000000000000000000000000000000000000000000'
)
assert bytes(call1) == buf
call2 = RPC.Call(buf)
assert call2.rpcvers == 2
assert call2.prog == 0
assert call2.vers == 0
assert call2.proc == 0
assert len(call2) == 32
assert bytes(call2) == buf
def test_reply():
from binascii import unhexlify
reply1 = RPC.Reply()
assert reply1.stat == MSG_ACCEPTED
assert bytes(reply1) == b'\00' * 4
buf_accepted = unhexlify(
'00000000' # MSG_ACCEPTED
'0000000000000000' # Auth
'00000000' # SUCCESS
'0000000000000000' # Auth
)
reply_accepted = RPC.Reply(buf_accepted)
assert reply_accepted.stat == MSG_ACCEPTED
assert bytes(reply_accepted) == buf_accepted
assert len(reply_accepted) == 24
buf_denied = unhexlify(
'00000001' # MSG_DENIED
'00000000' # RPC_MISMATCH
'00000000' # low
'FFFFFFFF' # high
'0000000000000000' # Auth
)
reply_denied = RPC.Reply(buf_denied)
assert reply_denied.stat == MSG_DENIED
assert bytes(reply_denied) == buf_denied
assert len(reply_denied) == 24
def test_accept():
from binascii import unhexlify
accept1 = RPC.Reply.Accept()
assert accept1.stat == SUCCESS
buf_success = unhexlify(
'0000000000000000' # Auth
'00000000' # SUCCESS
'0000000000000000' # Auth
)
accept_success = RPC.Reply.Accept(buf_success)
assert accept_success.stat == SUCCESS
assert len(accept_success) == 20
assert bytes(accept_success) == buf_success
buf_prog_mismatch = unhexlify(
'0000000000000000' # Auth
'00000002' # PROG_MISMATCH
'0000000000000000' # Auth
)
accept_prog_mismatch = RPC.Reply.Accept(buf_prog_mismatch)
assert accept_prog_mismatch.stat == PROG_MISMATCH
assert len(accept_prog_mismatch) == 20
assert bytes(accept_prog_mismatch) == buf_prog_mismatch
def test_reject():
from binascii import unhexlify
reject1 = RPC.Reply.Reject()
assert reject1.stat == AUTH_ERROR
buf_rpc_mismatch = unhexlify(
'00000000' # RPC_MISMATCH
'00000000' # low
'FFFFFFFF' # high
'0000000000000000' # Auth
)
reject2 = RPC.Reply.Reject(buf_rpc_mismatch)
assert bytes(reject2) == buf_rpc_mismatch
assert reject2.low == 0
assert reject2.high == 0xffffffff
assert len(reject2) == 20
buf_auth_error = unhexlify(
'00000001' # AUTH_ERROR
'00000000' # low
'FFFFFFFF' # high
'0000000000000000' # Auth
)
reject3 = RPC.Reply.Reject(buf_auth_error)
assert bytes(reject3) == buf_auth_error
assert len(reject3) == 20
buf_other = unhexlify(
'00000002' # NOT IMPLEMENTED
'00000000' # low
'FFFFFFFF' # high
'0000000000000000' # Auth
)
reject4 = RPC.Reply.Reject(buf_other)
assert bytes(reject4) == buf_other
assert len(reject4) == 20
def test_rpc():
from binascii import unhexlify
rpc = RPC()
assert rpc.xid == 0
assert rpc.dir == CALL
buf_call = unhexlify(
'00000000' # xid
'00000000' # CALL
'0000000200000000000000000000000000000000000000000000000000000000'
)
rpc_call = RPC(buf_call)
assert bytes(rpc_call) == buf_call
buf_reply = unhexlify(
'00000000' # xid
'00000001' # REPLY
'00000000' # MSG_ACCEPTED
'0000000000000000' # Auth
'00000000' # SUCCESS
'0000000000000000' # Auth
)
rpc_reply = RPC(buf_reply)
assert bytes(rpc_reply) == buf_reply
| 9,475 | 27.035503 | 90 |
py
|
dpkt
|
dpkt-master/dpkt/utils.py
|
"""Various Utility Functions"""
import socket
import warnings
from .compat import compat_ord
from .dns import DNS
def mac_to_str(address):
r"""Convert a MAC address to a readable/printable string
Args:
address (str): a MAC address in hex form (e.g. '\x01\x02\x03\x04\x05\x06')
Returns:
str: Printable/readable MAC address
"""
return ':'.join('%02x' % compat_ord(b) for b in address)
def inet_to_str(inet):
"""Convert inet object to a string
Args:
inet (inet struct): inet network address
Returns:
str: Printable/readable IP address
"""
# First try ipv4 and then ipv6
try:
return socket.inet_ntop(socket.AF_INET, inet)
except ValueError:
return socket.inet_ntop(socket.AF_INET6, inet)
def make_dict(obj):
"""Create a dictionary out of a non-builtin object"""
# Recursion base case
if is_builtin(obj):
return obj
output_dict = {}
for key in dir(obj):
if not key.startswith('__') and not callable(getattr(obj, key)):
attr = getattr(obj, key)
if isinstance(attr, list):
output_dict[key] = []
for item in attr:
output_dict[key].append(make_dict(item))
else:
output_dict[key] = make_dict(attr)
return output_dict
def is_builtin(obj):
return obj.__class__.__module__ in ['__builtin__', 'builtins']
def deprecation_warning(*args):
"""print a deprecation warning"""
warnings.warn(*args, stacklevel=2)
def test_utils():
"""Test the utility methods"""
from binascii import unhexlify
from pprint import pprint
print(mac_to_str(b'\x01\x02\x03\x04\x05\x06'))
assert mac_to_str(b'\x01\x02\x03\x04\x05\x06') == '01:02:03:04:05:06'
print(inet_to_str(b'\x91\xfe\xa0\xed'))
assert inet_to_str(b'\x91\xfe\xa0\xed') == '145.254.160.237'
ipv6_inet = b' \x01\r\xb8\x85\xa3\x00\x00\x00\x00\x8a.\x03ps4'
assert inet_to_str(ipv6_inet) == '2001:db8:85a3::8a2e:370:7334'
# Test the make_dict method with a DNS response packet
a_resp = unhexlify("059c8180000100010000000106676f6f676c6503636f6d0000010001c00c00010"
"0010000012b0004d83ace2e0000290200000000000000")
my_dns = DNS(a_resp)
pprint(make_dict(my_dns))
| 2,354 | 28.4375 | 90 |
py
|
dpkt
|
dpkt-master/dpkt/stun.py
|
# $Id: stun.py 47 2008-05-27 02:10:00Z jon.oberheide $
# -*- coding: utf-8 -*-
"""Simple Traversal of UDP through NAT."""
from __future__ import print_function
from __future__ import absolute_import
import struct
from . import dpkt
# STUN - RFC 3489
# http://tools.ietf.org/html/rfc3489
# Each packet has a 20 byte header followed by 0 or more attribute TLVs.
# Message Types
BINDING_REQUEST = 0x0001
BINDING_RESPONSE = 0x0101
BINDING_ERROR_RESPONSE = 0x0111
SHARED_SECRET_REQUEST = 0x0002
SHARED_SECRET_RESPONSE = 0x0102
SHARED_SECRET_ERROR_RESPONSE = 0x0112
# Message Attributes
MAPPED_ADDRESS = 0x0001
RESPONSE_ADDRESS = 0x0002
CHANGE_REQUEST = 0x0003
SOURCE_ADDRESS = 0x0004
CHANGED_ADDRESS = 0x0005
USERNAME = 0x0006
PASSWORD = 0x0007
MESSAGE_INTEGRITY = 0x0008
ERROR_CODE = 0x0009
UNKNOWN_ATTRIBUTES = 0x000a
REFLECTED_FROM = 0x000b
class STUN(dpkt.Packet):
"""Simple Traversal of UDP through NAT.
STUN - RFC 3489
http://tools.ietf.org/html/rfc3489
Each packet has a 20 byte header followed by 0 or more attribute TLVs.
Attributes:
__hdr__: Header fields of STUN.
type: (int): STUN Message Type (2 bytes)
len: (int): Message Length (2 bytes)
xid: (bytes): Magic Cookie and Transaction ID (16 bytes)
"""
__hdr__ = (
('type', 'H', 0),
('len', 'H', 0),
('xid', '16s', 0)
)
def tlv(buf):
n = 4
t, l_ = struct.unpack('>HH', buf[:n])
v = buf[n:n + l_]
pad = (n - l_ % n) % n
buf = buf[n + l_ + pad:]
return t, l_, v, buf
def parse_attrs(buf):
"""Parse STUN.data buffer into a list of (attribute, data) tuples."""
attrs = []
while buf:
t, _, v, buf = tlv(buf)
attrs.append((t, v))
return attrs
def test_stun_response():
s = (b'\x01\x01\x00\x0c\x21\x12\xa4\x42\x53\x4f\x70\x43\x69\x69\x35\x4a\x66\x63\x31\x7a\x00\x01'
b'\x00\x08\x00\x01\x11\x22\x33\x44\x55\x66')
m = STUN(s)
assert m.type == BINDING_RESPONSE
assert m.len == 12
attrs = parse_attrs(m.data)
assert attrs == [(MAPPED_ADDRESS, b'\x00\x01\x11\x22\x33\x44\x55\x66'), ]
def test_stun_padded():
s = (b'\x00\x01\x00\x54\x21\x12\xa4\x42\x35\x59\x53\x6e\x42\x71\x70\x56\x77\x61\x39\x4f\x00\x06'
b'\x00\x17\x70\x4c\x79\x5a\x48\x52\x3a\x47\x77\x4c\x33\x41\x48\x42\x6f\x76\x75\x62\x4c\x76'
b'\x43\x71\x6e\x00\x80\x2a\x00\x08\x18\x8b\x10\x4c\x69\x7b\xf6\x5b\x00\x25\x00\x00\x00\x24'
b'\x00\x04\x6e\x00\x1e\xff\x00\x08\x00\x14\x60\x2b\xc7\xfc\x0d\x10\x63\xaa\xc5\x38\x1c\xcb'
b'\x96\xa9\x73\x08\x73\x9a\x96\x0c\x80\x28\x00\x04\xd1\x62\xea\x65')
m = STUN(s)
assert m.type == BINDING_REQUEST
assert m.len == 84
attrs = parse_attrs(m.data)
assert len(attrs) == 6
assert attrs[0] == (USERNAME, b'pLyZHR:GwL3AHBovubLvCqn')
assert attrs[4][0] == MESSAGE_INTEGRITY
| 2,884 | 27.564356 | 100 |
py
|
dpkt
|
dpkt-master/dpkt/tpkt.py
|
# $Id: tpkt.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""ISO Transport Service on top of the TCP (TPKT)."""
from __future__ import absolute_import
from . import dpkt
# TPKT - RFC 1006 Section 6
# http://www.faqs.org/rfcs/rfc1006.html
class TPKT(dpkt.Packet):
"""ISO Transport Service on top of the TCP (TPKT).
"Emulate" ISO transport services COTP on top of TCP. The two major points missing in TCP (compared to COTP)
are the TSAP addressing and the detection of packet boundaries on the receiving host.
Attributes:
__hdr__: Header fields of TPKT.
v: (int): Version (1 byte)
rsvd: (int): Reserved (1 byte)
len: (int): Packet Length (2 bytes)
"""
__hdr__ = (
('v', 'B', 3),
('rsvd', 'B', 0),
('len', 'H', 0)
)
| 831 | 26.733333 | 111 |
py
|
dpkt
|
dpkt-master/dpkt/netflow.py
|
# $Id: netflow.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Cisco Netflow."""
from __future__ import print_function
from __future__ import absolute_import
import struct
from . import dpkt
from .compat import compat_izip
class NetflowBase(dpkt.Packet):
"""Base class for Cisco Netflow packets.
NetFlow is a feature that was introduced on Cisco routers around 1996 that provides the ability to collect IP
network traffic as it enters or exits an interface. By analyzing the data provided by NetFlow, a network
administrator can determine things such as the source and destination of traffic, class of service, and the causes
of congestion.
https://www.ciscopress.com/articles/article.asp?p=2812391&seqNum=3
Attributes:
__hdr__: Header fields of NetflowBase.
"""
__hdr__ = (
('version', 'H', 1),
('count', 'H', 0),
('sys_uptime', 'I', 0),
('unix_sec', 'I', 0),
('unix_nsec', 'I', 0)
)
def __len__(self):
return self.__hdr_len__ + (len(self.data[0]) * self.count)
def __bytes__(self):
# for now, don't try to enforce any size limits
self.count = len(self.data)
return self.pack_hdr() + b''.join(map(bytes, self.data))
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
buf = self.data
l_ = []
while buf:
flow = self.NetflowRecord(buf)
l_.append(flow)
buf = buf[len(flow):]
self.data = l_
class NetflowRecordBase(dpkt.Packet):
"""Base class for netflow v1-v7 netflow records.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of NetflowRecordBase.
TODO.
"""
# performance optimizations
def __len__(self):
# don't bother with data
return self.__hdr_len__
def __bytes__(self):
# don't bother with data
return self.pack_hdr()
def unpack(self, buf):
# don't bother with data
for k, v in compat_izip(self.__hdr_fields__,
struct.unpack(self.__hdr_fmt__, buf[:self.__hdr_len__])):
setattr(self, k, v)
self.data = b""
class Netflow1(NetflowBase):
"""Netflow Version 1.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of Netflow Version 1.
TODO.
"""
class NetflowRecord(NetflowBase.NetflowRecordBase):
"""Netflow v1 flow record.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of Netflow Version 1 flow record.
TODO.
"""
__hdr__ = (
('src_addr', 'I', 0),
('dst_addr', 'I', 0),
('next_hop', 'I', 0),
('input_iface', 'H', 0),
('output_iface', 'H', 0),
('pkts_sent', 'I', 0),
('bytes_sent', 'I', 0),
('start_time', 'I', 0),
('end_time', 'I', 0),
('src_port', 'H', 0),
('dst_port', 'H', 0),
('pad1', 'H', 0),
('ip_proto', 'B', 0),
('tos', 'B', 0),
('tcp_flags', 'B', 0),
('pad2', 'B', 0),
('pad3', 'H', 0),
('reserved', 'I', 0)
)
# FYI, versions 2-4 don't appear to have ever seen the light of day.
class Netflow5(NetflowBase):
"""Netflow Version 5.
Popular NetFlow version on many routers from different vendors. Limited to IPv4 flows.
Attributes:
__hdr__: Header fields of Netflow Version 5.
"""
__hdr__ = NetflowBase.__hdr__ + (
('flow_sequence', 'I', 0),
('engine_type', 'B', 0),
('engine_id', 'B', 0),
('reserved', 'H', 0),
)
class NetflowRecord(NetflowBase.NetflowRecordBase):
"""Netflow v5 flow record.
Attributes:
__hdr__: Header fields of Netflow Version 5 flow record.
"""
__hdr__ = (
('src_addr', 'I', 0),
('dst_addr', 'I', 0),
('next_hop', 'I', 0),
('input_iface', 'H', 0),
('output_iface', 'H', 0),
('pkts_sent', 'I', 0),
('bytes_sent', 'I', 0),
('start_time', 'I', 0),
('end_time', 'I', 0),
('src_port', 'H', 0),
('dst_port', 'H', 0),
('pad1', 'B', 0),
('tcp_flags', 'B', 0),
('ip_proto', 'B', 0),
('tos', 'B', 0),
('src_as', 'H', 0),
('dst_as', 'H', 0),
('src_mask', 'B', 0),
('dst_mask', 'B', 0),
('pad2', 'H', 0),
)
class Netflow6(NetflowBase):
"""Netflow Version 6.
(Obsolete.) No longer supported by Cisco, but may be found in the field.
Attributes:
__hdr__: Header fields of Netflow Version 6.
"""
__hdr__ = Netflow5.__hdr__
class NetflowRecord(NetflowBase.NetflowRecordBase):
"""Netflow v6 flow record.
Attributes:
__hdr__: Header fields of Netflow Version 6 flow record.
"""
__hdr__ = (
('src_addr', 'I', 0),
('dst_addr', 'I', 0),
('next_hop', 'I', 0),
('input_iface', 'H', 0),
('output_iface', 'H', 0),
('pkts_sent', 'I', 0),
('bytes_sent', 'I', 0),
('start_time', 'I', 0),
('end_time', 'I', 0),
('src_port', 'H', 0),
('dst_port', 'H', 0),
('pad1', 'B', 0),
('tcp_flags', 'B', 0),
('ip_proto', 'B', 0),
('tos', 'B', 0),
('src_as', 'H', 0),
('dst_as', 'H', 0),
('src_mask', 'B', 0),
('dst_mask', 'B', 0),
('in_encaps', 'B', 0),
('out_encaps', 'B', 0),
('peer_nexthop', 'I', 0),
)
class Netflow7(NetflowBase):
"""Netflow Version 7.
(Obsolete.) Like version 5, with a source router field.
Attributes:
__hdr__: Header fields of Netflow Version 7.
"""
__hdr__ = NetflowBase.__hdr__ + (
('flow_sequence', 'I', 0),
('reserved', 'I', 0),
)
class NetflowRecord(NetflowBase.NetflowRecordBase):
"""Netflow v6 flow record.
TODO: Longer class information....
Attributes:
__hdr__: Header fields of Netflow Version 6 flow record.
TODO.
"""
__hdr__ = (
('src_addr', 'I', 0),
('dst_addr', 'I', 0),
('next_hop', 'I', 0),
('input_iface', 'H', 0),
('output_iface', 'H', 0),
('pkts_sent', 'I', 0),
('bytes_sent', 'I', 0),
('start_time', 'I', 0),
('end_time', 'I', 0),
('src_port', 'H', 0),
('dst_port', 'H', 0),
('flags', 'B', 0),
('tcp_flags', 'B', 0),
('ip_proto', 'B', 0),
('tos', 'B', 0),
('src_as', 'H', 0),
('dst_as', 'H', 0),
('src_mask', 'B', 0),
('dst_mask', 'B', 0),
('pad2', 'H', 0),
('router_sc', 'I', 0),
)
# No support for v8 or v9 yet.
def test_net_flow_v1_unpack():
from binascii import unhexlify
__sample_v1 = unhexlify(
'00010018677a613c4200fc1c24930870ac012057c0a863f70a0002010003000a0000000100000228677a372c677a372c5c1b0050ac01112c10000'
'0000004001bac011853ac18d9aac0a832020003001900000001000005dc677a377c677a377cd8e30050ac01062c100000000004001bac011418ac'
'188dcdc0a832660003000700000001000005dc677a3790677a37908a81176fac0106361000000000040003ac0f2724ac01e51dc0a832060004001'
'b0000000100000228677a3a38677a3a38a3511236ac2906fd180000000004001bac011645ac23178ec0a832060003001b0000000100000228677a'
'3a4c677a3a4cc9ff0050ac1f0686020000000003001bac0d09ffac019995c0a832060004001b00000001000005dc677a3a58677a3a58ee390017a'
'c0106de1000000000040003ac0e4ad8ac01ae2fc0a832060004001b00000001000005dc677a3a68677a3a68b36e0015ac01068110000000000400'
'1bac012338ac01d92ac0a832060003001b00000001000005dc677a3a74677a3a7400008350ac2101ab100000000003001bac0a6037ac2a934ac0a'
'832060004001b00000001000005dc677a3a74677a3a7400000000ac0132a91000000000040007ac0a471fac01fd4ac0a832060004001b00000001'
'00000028677a3a88677a3a8821996987ac1e067e020000000003001bac0128c9ac0142c4c0a83202000300190000000100000028677a3a88677a3'
'a887d360050ac0106fe100000000004001bac0b08e8ac0146e2c0a832020004001900000001000005dc677a3a9c677a3a9c60696987ac01063b10'
'0000000004001bac011d24ac3cf0c3c0a832060003001b00000001000005dc677a3a9c677a3a9c46320014ac0106731800000000040003ac0b115'
'1ac01de06c0a832060004001b00000001000005dc677a3ab0677a3ab0ef231a2bac2906e9100000000004001bac0c52d9ac016fe8c0a832020004'
'001900000001000005dc677a3ac4677a3ac4136e006eac1906a81000000000030019ac013dddac017deec0a832660003000700000001000000286'
'77a3ac4677a3ac40000dcbbac0101d3100000000004001bac0f28d1ac01cca5c0a832060004001b00000001000005dc677a3ad8677a3ad8c57317'
'6fac1906231800000000030007ac0a855bc0a8636e0a0002010004000a00000001000005dc677a3ae4677a3ae4bf6c0050ac0106cf10000000000'
'40007ac01301fac182145c0a832660003000700000001000005dc677a3b00677a3b00119504bec0a806ea100000000003000aac0130b6ac1ef4aa'
'c0a832060003001b00000001000005dc677a3b34677a3b3488640017ac01061f100000000004001bac01235fac1eb009c0a832060003001b00000'
'001000005dc677a3b48677a3b4881530050ac20064e100000000003001bac0104d9ac019463c0a832060003001b0000000100000228677a3b5c67'
'7a3b5c55100050ac010650180000000004001bac013caeac2aac21c0a832060003001b00000001000000fa677a3b84677a3b840ce70050ac0111f'
'd100000000004001bac011f1fac17ed69c0a832020003001900000001000005dc677a3b98677a3b98ba170016ac01067c1000000000030007'
)
nf = Netflow1(__sample_v1)
assert len(nf.data) == 24
# print repr(nfv1)
def test_net_flow_v5_unpack():
from binascii import unhexlify
buf_nf5_header = unhexlify(
'0005001db5fac9d03a0b4142265677de9b73763100010000'
)
buf_nf5_records = list(map(unhexlify, (
'ac0a86a6ac01aaf7c0a83232027100690000000100000228b5fa8114b5fa811435320050000006000000000000000000',
'ac019144ac1443e4c0a83216006902710000000100000028b5fa9bbdb5fa9bbd005085d7000006000000000000000000',
'ac17e2d7ac018c56c0a832320271006900000001000005dcb5fa6fb8b5fa6fb876e8176f000006000000000000000000',
'ac0ef2e5ac0191b2c0a832320271006900000001000000fab5fa81eeb5fa81eed0eb0015000006000000000000000000',
'ac0a436aac29a7090a000201027100db0000000100000228b5fa8592b5fa85928cb00035000006000000000000000000',
'ac01963dac151aa8c0a832160069027100000001000005dcb5fa86e0b5fa86e0b4e700c2000006000000000000000000',
'ac0156d1ac018615c0a832320271006900000001000005dcb5fa7d3ab5fa7d3a5b510050000006000000000000000000',
'ac32f1b1ac2919ca0a000201027100db00000001000005dcb5fa83c3b5fa83c3162c0015000006000000000000000000',
'ac0c4134ac019a7ac0a832320271006900000001000005dcb5fa8da7b5fa8da717330015000006000000000000000000',
'ac1ed284ac29d8d20a000201027100db00000001000005dcb5fa8e97b5fa8e97372a176f000006000000000000000000',
'ac01854aac2011fcc0a83216006902710000000100000228b5fa8834b5fa8834f5dd008f000006000000000000000000',
'ac010480ac3c5b6e0a000201027100db00000001000005dcb5fa9d72b5fa9d7273240016000006000000000000000000',
'ac01b94aac22c9d7c0a83216006902710000000100000028b5fa9072b5fa90720f8d00c2000006000000000000000000',
'ac2aa310ac01b419c0a83232027100690000000100000028b5fa9203b5fa920370660015000006000000000000000000',
'ac01ab6fac1e7f69c0a832160069027100000001000005dcb5fa937fb5fa937f00500b98000006000000000000000000',
'ac0c0aeaac01a115c0a832320271006900000001000005dcb5fa79cfb5fa79cf5b3317e0000006000000000000000000',
'ac01bbb3ac29758c0a000201006900db00000001000000fab5fa9433b5fa943300501eca000006000000000000000000',
'ac0f4a60ac01ab94c0a83232027100690000000100000228b5fa875bb5fa875b9ad62fab000006000000000000000000',
'ac2a0f93ac01b8a3c0a83232027100690000000100000028b5fa89bbb5fa89bb6ee10050000006000000000000000000',
'ac0193a1ac16800cc0a83216006902710000000100000028b5fa8726b5fa872600000000000001000000000000000000',
'ac01835aac1f52cdc0a832160069027100000001000005dcb5fa900db5fa900df72a008a000006000000000000000000',
'ac0ce0adac01a856c0a832320271006900000001000005dcb5fa9cf6b5fa9cf6e57c1a2b000006000000000000000000',
'ac1ecc54ac3c78260a000201027100db00000001000005dcb5fa80eab5fa80ea0000000000002f000000000000000000',
'ac01bb18ac017c7ac0a832160069027100000001000000fab5fa8870b5fa887000500b7d000006000000000000000000',
'ac170e72ac018fddc0a83232027100690000000100000228b5fa89f7b5fa89f70df7008a000006000000000000000000',
'ac0abb04ac3cb0150a000201027100db00000001000005dcb5fa90a9b5fa90a99cd0008f000006000000000000000000',
'ac0a7a3fac2903c80a000201027100db00000001000005dcb5fa7565b5fa7565eea60050000006000000000000000000',
'ac01b505c0a8639f0a000201006900db00000001000005dcb5fa7bc7b5fa7bc7005086a9000006000000000000000000',
'ac32a51bac2930bf0a000201027100db00000001000000fab5fa9b5ab5fa9b5a43f917e0000006000000000000000000',
)))
buf_input = buf_nf5_header + b''.join(buf_nf5_records)
nf = Netflow5(buf_input)
assert nf.version == 5
assert nf.count == 29
assert nf.sys_uptime == 3053111760
assert nf.unix_sec == 973816130
assert len(nf) == len(buf_input)
assert bytes(nf) == buf_input
assert len(nf.data) == 29
for idx, record in enumerate(nf.data):
assert bytes(record) == buf_nf5_records[idx]
assert len(record) == 48
| 13,857 | 39.052023 | 127 |
py
|
dpkt
|
dpkt-master/dpkt/yahoo.py
|
# $Id: yahoo.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Yahoo Messenger."""
from __future__ import absolute_import
from . import dpkt
class YHOO(dpkt.Packet):
"""Yahoo Messenger.
Yahoo! Messenger (sometimes abbreviated Y!M) was an advertisement-supported instant messaging client and associated
protocol provided by Yahoo!. Yahoo! Messenger was provided free of charge and could be downloaded and used with a
generic "Yahoo ID" which also allowed access to other Yahoo! services, such as Yahoo! Mail. The service also
offered VoIP, file transfers, webcam hosting, a text messaging service, and chat rooms in various categories.
Attributes:
__hdr__: Header fields of Yahoo Messenger.
version: (bytes): Version. (8 bytes)
length: (int): Length. (4 bytes)
service: (int): Service. (4 bytes)
connid: (int): Connection ID. (4 bytes)
magic: (int): Magic. (4 bytes)
unknown: (int): Unknown. (4 bytes)
type: (int): Type. (4 bytes)
nick1: (bytes): Nick1. (36 bytes)
nick2: (bytes): Nick2. (36 bytes)
"""
__hdr__ = [
('version', '8s', ' ' * 8),
('length', 'I', 0),
('service', 'I', 0),
('connid', 'I', 0),
('magic', 'I', 0),
('unknown', 'I', 0),
('type', 'I', 0),
('nick1', '36s', ' ' * 36),
('nick2', '36s', ' ' * 36)
]
__byte_order__ = '<'
class YMSG(dpkt.Packet):
__hdr__ = [
('version', '8s', ' ' * 8),
('length', 'H', 0),
('type', 'H', 0),
('unknown1', 'I', 0),
('unknown2', 'I', 0)
]
| 1,688 | 31.480769 | 119 |
py
|
dpkt
|
dpkt-master/dpkt/gzip.py
|
# $Id: gzip.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""GNU zip."""
from __future__ import print_function
from __future__ import absolute_import
import struct
import zlib
from . import dpkt
# RFC 1952
GZIP_MAGIC = b'\x1f\x8b'
# Compression methods
GZIP_MSTORED = 0
GZIP_MCOMPRESS = 1
GZIP_MPACKED = 2
GZIP_MLZHED = 3
GZIP_MDEFLATE = 8
# Flags
GZIP_FTEXT = 0x01
GZIP_FHCRC = 0x02
GZIP_FEXTRA = 0x04
GZIP_FNAME = 0x08
GZIP_FCOMMENT = 0x10
GZIP_FENCRYPT = 0x20
GZIP_FRESERVED = 0xC0
# OS
GZIP_OS_MSDOS = 0
GZIP_OS_AMIGA = 1
GZIP_OS_VMS = 2
GZIP_OS_UNIX = 3
GZIP_OS_VMCMS = 4
GZIP_OS_ATARI = 5
GZIP_OS_OS2 = 6
GZIP_OS_MACOS = 7
GZIP_OS_ZSYSTEM = 8
GZIP_OS_CPM = 9
GZIP_OS_TOPS20 = 10
GZIP_OS_WIN32 = 11
GZIP_OS_QDOS = 12
GZIP_OS_RISCOS = 13
GZIP_OS_UNKNOWN = 255
GZIP_FENCRYPT_LEN = 12
class GzipExtra(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('id', '2s', b''),
('len', 'H', 0)
)
class Gzip(dpkt.Packet):
__byte_order__ = '<'
__hdr__ = (
('magic', '2s', GZIP_MAGIC),
('method', 'B', GZIP_MDEFLATE),
('flags', 'B', 0),
('mtime', 'I', 0),
('xflags', 'B', 0),
('os', 'B', GZIP_OS_UNIX),
)
def __init__(self, *args, **kwargs):
self.extra = None
self.filename = None
self.comment = None
super(Gzip, self).__init__(*args, **kwargs)
def unpack(self, buf):
super(Gzip, self).unpack(buf)
if self.flags & GZIP_FEXTRA:
if len(self.data) < 2:
raise dpkt.NeedData('Gzip extra')
n = struct.unpack('<H', self.data[:2])[0]
if len(self.data) < 2 + n:
raise dpkt.NeedData('Gzip extra')
self.extra = GzipExtra(self.data[2:2 + n])
self.data = self.data[2 + n:]
if self.flags & GZIP_FNAME:
n = self.data.find(b'\x00')
if n == -1:
raise dpkt.NeedData('Gzip end of file name not found')
self.filename = self.data[:n].decode('utf-8')
self.data = self.data[n + 1:]
if self.flags & GZIP_FCOMMENT:
n = self.data.find(b'\x00')
if n == -1:
raise dpkt.NeedData('Gzip end of comment not found')
self.comment = self.data[:n]
self.data = self.data[n + 1:]
if self.flags & GZIP_FENCRYPT:
if len(self.data) < GZIP_FENCRYPT_LEN:
raise dpkt.NeedData('Gzip encrypt')
self.data = self.data[GZIP_FENCRYPT_LEN:] # XXX - skip
if self.flags & GZIP_FHCRC:
if len(self.data) < 2:
raise dpkt.NeedData('Gzip hcrc')
self.data = self.data[2:] # XXX - skip
def pack_hdr(self):
l_ = []
if self.extra:
self.flags |= GZIP_FEXTRA
s = bytes(self.extra)
l_.append(struct.pack('<H', len(s)))
l_.append(s)
if self.filename:
self.flags |= GZIP_FNAME
l_.append(self.filename.encode('utf-8'))
l_.append(b'\x00')
if self.comment:
self.flags |= GZIP_FCOMMENT
l_.append(self.comment)
l_.append(b'\x00')
l_.insert(0, super(Gzip, self).pack_hdr())
return b''.join(l_)
def compress(self):
"""Compress self.data."""
c = zlib.compressobj(
zlib.Z_BEST_COMPRESSION,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
zlib.Z_DEFAULT_STRATEGY,
)
c.compress(self.data)
# .compress will return nothing if len(self.data) < the window size.
self.data = c.flush()
def decompress(self):
"""Return decompressed payload."""
d = zlib.decompressobj(-zlib.MAX_WBITS)
return d.decompress(self.data)
class TestGzip(object):
"""This data is created with the gzip command line tool"""
@classmethod
def setup_class(cls):
from binascii import unhexlify
cls.data = unhexlify(
b'1F8B' # magic
b'080880C185560003' # header
b'68656C6C6F2E74787400' # filename
b'F348CDC9C95728CF2FCA4951E40200' # data
b'41E4A9B20D000000' # checksum
)
cls.p = Gzip(cls.data)
def test_method(self):
assert (self.p.method == GZIP_MDEFLATE)
def test_flags(self):
assert (self.p.flags == GZIP_FNAME)
def test_mtime(self):
# Fri Jan 01 00:00:00 2016 UTC
assert (self.p.mtime == 0x5685c180)
def test_xflags(self):
assert (self.p.xflags == 0)
def test_os(self):
assert (self.p.os == GZIP_OS_UNIX)
def test_filename(self):
assert (self.p.filename == "hello.txt") # always str (utf-8)
def test_decompress(self):
assert (self.p.decompress() == b"Hello world!\n") # always bytes
def test_flags_extra():
import pytest
from binascii import unhexlify
buf = unhexlify(
'1F8B' # magic
'08' # method
'04' # flags (GZIP_FEXTRA)
'80C18556' # mtime
'00' # xflags
'03' # os
)
# not enough data to extract
with pytest.raises(dpkt.NeedData, match='Gzip extra'):
Gzip(buf)
buf += unhexlify('0400') # append the length of the fextra
# not enough data to extract in extra section
with pytest.raises(dpkt.NeedData, match='Gzip extra'):
Gzip(buf)
buf += unhexlify('494401000102')
gzip = Gzip(buf)
assert gzip.extra.id == b'ID'
assert gzip.extra.len == 1
assert gzip.data == unhexlify('0102')
assert bytes(gzip) == buf
def test_flags_filename():
import pytest
from binascii import unhexlify
buf = unhexlify(
'1F8B' # magic
'08' # method
'08' # flags (GZIP_FNAME)
'80C18556' # mtime
'00' # xflags
'03' # os
'68656C6C6F2E747874' # filename
)
# no trailing null character so unpacking fails
with pytest.raises(dpkt.NeedData, match='Gzip end of file name not found'):
Gzip(buf)
buf += unhexlify('00')
gzip = Gzip(buf)
assert gzip.filename == 'hello.txt'
assert gzip.data == b''
assert bytes(gzip) == buf
def test_flags_comment():
import pytest
from binascii import unhexlify
buf = unhexlify(
'1F8B' # magic
'08' # method
'10' # flags (GZIP_FCOMMENT)
'80C18556' # mtime
'00' # xflags
'03' # os
'68656C6C6F2E747874' # comment
)
# no trailing null character so unpacking fails
with pytest.raises(dpkt.NeedData, match='Gzip end of comment not found'):
Gzip(buf)
buf += unhexlify('00')
gzip = Gzip(buf)
assert gzip.comment == b'hello.txt'
assert gzip.data == b''
assert bytes(gzip) == buf
def test_flags_encrypt():
import pytest
from binascii import unhexlify
buf_header = unhexlify(
'1F8B' # magic
'08' # method
'20' # flags (GZIP_FENCRYPT)
'80C18556' # mtime
'00' # xflags
'03' # os
)
# not enough data
with pytest.raises(dpkt.NeedData, match='Gzip encrypt'):
Gzip(buf_header)
encrypted_buffer = unhexlify('0102030405060708090a0b0c')
data = unhexlify('0123456789abcdef')
gzip = Gzip(buf_header + encrypted_buffer + data)
assert gzip.data == data
assert bytes(gzip) == buf_header + data
def test_flags_hcrc():
import pytest
from binascii import unhexlify
buf_header = unhexlify(
'1F8B' # magic
'08' # method
'02' # flags (GZIP_FHCRC)
'80C18556' # mtime
'00' # xflags
'03' # os
)
# not enough data
with pytest.raises(dpkt.NeedData, match='Gzip hcrc'):
Gzip(buf_header)
hcrc = unhexlify('0102')
data = unhexlify('0123456789abcdef')
gzip = Gzip(buf_header + hcrc + data)
assert gzip.data == data
assert bytes(gzip) == buf_header + data
def test_compress():
from binascii import unhexlify
buf_header = unhexlify(
'1F8B' # magic
'08' # method
'00' # flags (NONE)
'80C18556' # mtime
'00' # xflags
'03' # os
)
plain_text = b'Hello world!\n'
compressed_text = unhexlify('F348CDC9C95728CF2FCA4951E40200')
gzip = Gzip(buf_header + plain_text)
assert gzip.data == plain_text
gzip.compress()
assert gzip.data == compressed_text
assert bytes(gzip) == buf_header + compressed_text
assert gzip.decompress() == plain_text
| 8,774 | 25.038576 | 79 |
py
|
dpkt
|
dpkt-master/dpkt/ethernet.py
|
# $Id: ethernet.py 65 2010-03-26 02:53:51Z dugsong $
# -*- coding: utf-8 -*-
"""
Ethernet II, LLC (802.3+802.2), LLC/SNAP, and Novell raw 802.3,
with automatic 802.1q, MPLS, PPPoE, and Cisco ISL decapsulation.
"""
from __future__ import print_function
from __future__ import absolute_import
import struct
from zlib import crc32
from . import dpkt
from . import llc
from .utils import mac_to_str
from .compat import compat_ord, iteritems, isstr
ETH_CRC_LEN = 4
ETH_HDR_LEN = 14
ETH_LEN_MIN = 64 # minimum frame length with CRC
ETH_LEN_MAX = 1518 # maximum frame length with CRC
ETH_MTU = (ETH_LEN_MAX - ETH_HDR_LEN - ETH_CRC_LEN)
ETH_MIN = (ETH_LEN_MIN - ETH_HDR_LEN - ETH_CRC_LEN)
# Ethernet payload types - http://standards.ieee.org/regauth/ethertype
ETH_TYPE_UNKNOWN = 0x0000
ETH_TYPE_EDP = 0x00bb # Extreme Networks Discovery Protocol
ETH_TYPE_PUP = 0x0200 # PUP protocol
ETH_TYPE_IP = 0x0800 # IP protocol
ETH_TYPE_ARP = 0x0806 # address resolution protocol
ETH_TYPE_AOE = 0x88a2 # AoE protocol
ETH_TYPE_CDP = 0x2000 # Cisco Discovery Protocol
ETH_TYPE_DTP = 0x2004 # Cisco Dynamic Trunking Protocol
ETH_TYPE_REVARP = 0x8035 # reverse addr resolution protocol
ETH_TYPE_8021Q = 0x8100 # IEEE 802.1Q VLAN tagging
ETH_TYPE_8021AD = 0x88a8 # IEEE 802.1ad
ETH_TYPE_QINQ1 = 0x9100 # Legacy QinQ
ETH_TYPE_QINQ2 = 0x9200 # Legacy QinQ
ETH_TYPE_IPX = 0x8137 # Internetwork Packet Exchange
ETH_TYPE_IP6 = 0x86DD # IPv6 protocol
ETH_TYPE_PPP = 0x880B # PPP
ETH_TYPE_MPLS = 0x8847 # MPLS
ETH_TYPE_MPLS_MCAST = 0x8848 # MPLS Multicast
ETH_TYPE_PPPoE_DISC = 0x8863 # PPP Over Ethernet Discovery Stage
ETH_TYPE_PPPoE = 0x8864 # PPP Over Ethernet Session Stage
ETH_TYPE_LLDP = 0x88CC # Link Layer Discovery Protocol
ETH_TYPE_TEB = 0x6558 # Transparent Ethernet Bridging
ETH_TYPE_PROFINET = 0x8892 # PROFINET protocol
# all QinQ types for fast checking
_ETH_TYPES_QINQ = frozenset([ETH_TYPE_8021Q, ETH_TYPE_8021AD, ETH_TYPE_QINQ1, ETH_TYPE_QINQ2])
class Ethernet(dpkt.Packet):
"""Ethernet.
Ethernet II, LLC (802.3+802.2), LLC/SNAP, and Novell raw 802.3,
with automatic 802.1q, MPLS, PPPoE, and Cisco ISL decapsulation.
Attributes:
__hdr__: Header fields of Ethernet.
dst: (bytes): Destination MAC address
src: (bytes): Source MAC address
type: (int): Ethernet frame type (Ethernet II, Novell raw IEEE 802.3, IEEE 802.2 LLC, IEEE 802.2 SNAP)
"""
__hdr__ = (
('dst', '6s', b''),
('src', '6s', b''),
('type', 'H', ETH_TYPE_IP)
)
_typesw = {}
_typesw_rev = {} # reverse mapping
__pprint_funcs__ = {
'dst': mac_to_str,
'src': mac_to_str,
}
def __init__(self, *args, **kwargs):
self._next_type = None
dpkt.Packet.__init__(self, *args, **kwargs)
# if data was given in kwargs, try to unpack it
if self.data:
if isstr(self.data) or isinstance(self.data, bytes):
self._unpack_data(self.data)
def _unpack_data(self, buf):
# unpack vlan tag and mpls label stacks
if self._next_type in _ETH_TYPES_QINQ:
self.vlan_tags = []
# support up to 2 tags (double tagging aka QinQ)
for _ in range(2):
tag = VLANtag8021Q(buf)
buf = buf[tag.__hdr_len__:]
self.vlan_tags.append(tag)
self._next_type = tag.type
if self._next_type != ETH_TYPE_8021Q:
break
# backward compatibility, use the 1st tag
self.vlanid, self.priority, self.cfi = self.vlan_tags[0].as_tuple()
elif self._next_type == ETH_TYPE_MPLS or self._next_type == ETH_TYPE_MPLS_MCAST:
self.labels = [] # old list containing labels as tuples
self.mpls_labels = [] # new list containing labels as instances of MPLSlabel
# XXX - max # of labels is undefined, just use 24
for i in range(24):
lbl = MPLSlabel(buf)
buf = buf[lbl.__hdr_len__:]
self.mpls_labels.append(lbl)
self.labels.append(lbl.as_tuple())
if lbl.s: # bottom of stack
break
# poor man's heuristics to guessing the next type
if compat_ord(buf[0]) == 0x45: # IP version 4 + header len 20 bytes
self._next_type = ETH_TYPE_IP
elif compat_ord(buf[0]) & 0xf0 == 0x60: # IP version 6
self._next_type = ETH_TYPE_IP6
# pseudowire Ethernet
elif len(buf) >= self.__hdr_len__:
if buf[:2] == b'\x00\x00': # looks like the control word (ECW)
buf = buf[4:] # skip the ECW
self._next_type = ETH_TYPE_TEB # re-use TEB class mapping to decode Ethernet
try:
eth_type = self._next_type or self.type
self.data = self._typesw[eth_type](buf)
setattr(self, self.data.__class__.__name__.lower(), self.data)
except (KeyError, dpkt.UnpackError):
self.data = buf
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
if self.type > 1500:
# Ethernet II
self._next_type = self.type
self._unpack_data(self.data)
elif (self.dst.startswith(b'\x01\x00\x0c\x00\x00') or
self.dst.startswith(b'\x03\x00\x0c\x00\x00')):
# Cisco ISL
tag = VLANtagISL(buf)
buf = buf[tag.__hdr_len__:]
self.vlan_tags = [tag]
self.vlan = tag.id # backward compatibility
self.unpack(buf)
elif self.data.startswith(b'\xff\xff'):
# Novell "raw" 802.3
self.type = ETH_TYPE_IPX
self.data = self.ipx = self._typesw[ETH_TYPE_IPX](self.data[2:])
elif self.type == ETH_TYPE_UNKNOWN:
# Unknown type, assume Ethernet
self._unpack_data(self.data)
else:
# IEEE 802.3 Ethernet - LLC
# try to unpack FCS, padding and trailer here
# we follow a heuristic approach similar to that of Wireshark
# size of eth body, not including the header
eth_len = self.len = self.type
# actual size of the remaining data, could include eth body, padding, fcs, trailer
data_len = len(self.data)
if data_len > eth_len:
# everything after eth body
tail = self.data[eth_len:]
# could be padding + fcs, possibly trailer
if len(tail) > 4:
# determine size of padding
if eth_len < 46: # 46=60-14; 14=size of eth hdr; all padded to 60 bytes
pad_len = 46 - eth_len
padding = tail[:pad_len]
# heuristic
if padding == pad_len * b'\x00': # padding is likely zeroes
self.padding = padding
tail = tail[pad_len:]
# else proceed to decode as fcs+trailer
# 4 bytes FCS and possible trailer
if len(tail) >= 4:
self.fcs = struct.unpack('>I', tail[:4])[0]
tail = tail[4:]
if tail:
self.trailer = tail
self.data = self.llc = llc.LLC(self.data[:eth_len])
def pack_hdr(self):
tags_buf = b''
new_type = self.type # replacement self.type when packing eth header
is_isl = False # ISL wraps Ethernet, this determines order of packing
if getattr(self, 'mpls_labels', None):
# mark all labels with s=0, last one with s=1
for lbl in self.mpls_labels:
lbl.s = 0
lbl.s = 1
# set encapsulation type
if new_type not in (ETH_TYPE_MPLS, ETH_TYPE_MPLS_MCAST):
new_type = ETH_TYPE_MPLS
tags_buf = b''.join(lbl.pack_hdr() for lbl in self.mpls_labels)
elif getattr(self, 'vlan_tags', None):
# set last tag type to next layer pointed by self.data
last_tag_type = self.type # default
if isinstance(self.data, dpkt.Packet):
last_tag_type = self._typesw_rev.get(self.data.__class__, self.type)
# set encapsulation types
t1 = self.vlan_tags[0]
if len(self.vlan_tags) == 1:
if isinstance(t1, VLANtag8021Q):
if new_type not in _ETH_TYPES_QINQ: # preserve the type if already set
new_type = ETH_TYPE_8021Q
t1.type = last_tag_type
elif isinstance(t1, VLANtagISL):
t1.type = 0 # 0 means Ethernet
is_isl = True
elif len(self.vlan_tags) == 2:
t2 = self.vlan_tags[1]
if isinstance(t1, VLANtag8021Q) and isinstance(t2, VLANtag8021Q):
t1.type = ETH_TYPE_8021Q
if new_type not in _ETH_TYPES_QINQ:
new_type = ETH_TYPE_8021AD
t2.type = last_tag_type
else:
raise dpkt.PackError('maximum is 2 VLAN tags per Ethernet frame')
tags_buf = b''.join(tag.pack_hdr() for tag in self.vlan_tags)
# initial type is based on next layer, pointed by self.data;
# try to find an ETH_TYPE matching the data class
elif isinstance(self.data, dpkt.Packet):
new_type = self._typesw_rev.get(self.data.__class__, new_type)
# if self.data is LLC then this is IEEE 802.3 Ethernet and self.type
# then actually encodes the length of data
if isinstance(self.data, llc.LLC):
new_type = len(self.data)
hdr_buf = dpkt.Packet.pack_hdr(self)[:-2] + struct.pack('>H', new_type)
if not is_isl:
return hdr_buf + tags_buf
else:
return tags_buf + hdr_buf
def __bytes__(self):
tail = b''
if isinstance(self.data, llc.LLC):
fcs = b''
if hasattr(self, 'fcs'):
if self.fcs:
fcs = self.fcs
else:
# if fcs field is present but 0/None, then compute it and add to the tail
fcs_buf = self.pack_hdr() + bytes(self.data)
# if ISL header is present, exclude it from the calculation
if getattr(self, 'vlan_tags', None):
if isinstance(self.vlan_tags[0], VLANtagISL):
fcs_buf = fcs_buf[VLANtagISL.__hdr_len__:]
fcs_buf += getattr(self, 'padding', b'')
revcrc = crc32(fcs_buf) & 0xffffffff
fcs = struct.unpack('<I', struct.pack('>I', revcrc))[0] # bswap32
fcs = struct.pack('>I', fcs)
tail = getattr(self, 'padding', b'') + fcs + getattr(self, 'trailer', b'')
return bytes(dpkt.Packet.__bytes__(self) + tail)
def __len__(self):
tags = getattr(self, 'mpls_labels', []) + getattr(self, 'vlan_tags', [])
_len = dpkt.Packet.__len__(self) + sum(t.__hdr_len__ for t in tags)
if isinstance(self.data, llc.LLC):
_len += len(getattr(self, 'padding', b''))
if hasattr(self, 'fcs'):
_len += 4
_len += len(getattr(self, 'trailer', b''))
return _len
@classmethod
def set_type(cls, t, pktclass):
cls._typesw[t] = pktclass
cls._typesw_rev[pktclass] = t
@classmethod
def get_type(cls, t):
return cls._typesw[t]
@classmethod
def get_type_rev(cls, k):
return cls._typesw_rev[k]
# XXX - auto-load Ethernet dispatch table from ETH_TYPE_* definitions
def __load_types():
g = globals()
for k, v in iteritems(g):
if k.startswith('ETH_TYPE_'):
name = k[9:]
modname = name.lower()
try:
mod = __import__(modname, g, level=1)
Ethernet.set_type(v, getattr(mod, name))
except (ImportError, AttributeError):
continue
# add any special cases below
Ethernet.set_type(ETH_TYPE_TEB, Ethernet)
def _mod_init():
"""Post-initialization called when all dpkt modules are fully loaded"""
if not Ethernet._typesw:
__load_types()
# Misc protocols
class MPLSlabel(dpkt.Packet):
"""A single entry in MPLS label stack"""
__hdr__ = (
('_val_exp_s_ttl', 'I', 0),
)
# field names are according to RFC3032
__bit_fields__ = {
'_val_exp_s_ttl': (
('val', 20), # label value, 20 bits
('exp', 3), # experimental use, 3 bits
('s', 1), # bottom of stack flag, 1 bit
('ttl', 8), # time to live, 8 bits
)
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = b''
def as_tuple(self): # backward-compatible representation
return (self.val, self.exp, self.ttl)
class VLANtag8021Q(dpkt.Packet):
"""IEEE 802.1q VLAN tag"""
__hdr__ = (
('_pri_cfi_id', 'H', 0),
('type', 'H', ETH_TYPE_IP)
)
__bit_fields__ = {
'_pri_cfi_id': (
('pri', 3), # priority, 3 bits
('cfi', 1), # canonical format indicator, 1 bit
('id', 12), # VLAN id, 12 bits
)
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = b''
def as_tuple(self):
return (self.id, self.pri, self.cfi)
class VLANtagISL(dpkt.Packet):
"""Cisco Inter-Switch Link VLAN tag"""
__hdr__ = (
('da', '5s', b'\x01\x00\x0c\x00\x00'),
('_type_pri', 'B', 3),
('sa', '6s', b''),
('len', 'H', 0),
('snap', '3s', b'\xaa\xaa\x03'),
('hsa', '3s', b'\x00\x00\x0c'),
('_id_bpdu', 'H', 0),
('indx', 'H', 0),
('res', 'H', 0)
)
__bit_fields__ = {
'_type_pri': (
('type', 4), # encapsulation type, 4 bits; 0 means Ethernet
('pri', 4), # user defined bits, 2 lo bits are used; means priority
),
'_id_bpdu': (
('id', 15), # vlan id, 15 bits
('bpdu', 1), # bridge protocol data unit indicator
)
}
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = b''
# Unit tests
def test_eth():
from . import ip6
from . import tcp
s = (b'\x00\xb0\xd0\xe1\x80\x72\x00\x11\x24\x8c\x11\xde\x86\xdd\x60\x00\x00\x00'
b'\x00\x28\x06\x40\xfe\x80\x00\x00\x00\x00\x00\x00\x02\x11\x24\xff\xfe\x8c'
b'\x11\xde\xfe\x80\x00\x00\x00\x00\x00\x00\x02\xb0\xd0\xff\xfe\xe1\x80\x72'
b'\xcd\xd3\x00\x16\xff\x50\xd7\x13\x00\x00\x00\x00\xa0\x02\xff\xff\x67\xd3'
b'\x00\x00\x02\x04\x05\xa0\x01\x03\x03\x00\x01\x01\x08\x0a\x7d\x18\x3a\x61'
b'\x00\x00\x00\x00')
eth = Ethernet(s)
assert eth
assert isinstance(eth.data, ip6.IP6)
assert isinstance(eth.data.data, tcp.TCP)
assert str(eth) == str(s)
assert len(eth) == len(s)
def test_eth_zero_ethtype():
s = (b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x08\x89\x12\x04')
eth = Ethernet(s)
assert eth
assert eth.type == ETH_TYPE_UNKNOWN
assert str(eth) == str(s)
assert len(eth) == len(s)
def test_eth_init_with_data():
# initialize with a data string, test that it gets unpacked
from . import arp
eth1 = Ethernet(
dst=b'PQRSTU', src=b'ABCDEF', type=ETH_TYPE_ARP,
data=b'\x00\x01\x08\x00\x06\x04\x00\x01123456abcd7890abwxyz')
assert isinstance(eth1.data, arp.ARP)
# now initialize with a class, test packing
eth2 = Ethernet(
dst=b'PQRSTU', src=b'ABCDEF',
data=arp.ARP(sha=b'123456', spa=b'abcd', tha=b'7890ab', tpa=b'wxyz'))
assert str(eth1) == str(eth2)
assert len(eth1) == len(eth2)
def test_mpls_label():
s = b'\x00\x01\x0b\xff'
m = MPLSlabel(s)
assert m.val == 16
assert m.exp == 5
assert m.s == 1
assert m.ttl == 255
assert str(m) == str(s)
assert len(m) == len(s)
def test_802dot1q_tag():
s = b'\xa0\x76\x01\x65'
t = VLANtag8021Q(s)
assert t.pri == 5
assert t.cfi == 0
assert t.id == 118
assert str(t) == str(s)
t.cfi = 1
assert str(t) == str(b'\xb0\x76\x01\x65')
assert len(t) == len(s)
def test_isl_tag():
s = (b'\x01\x00\x0c\x00\x00\x03\x00\x02\xfd\x2c\xb8\x97\x00\x00\xaa\xaa\x03\x00\x00\x00\x04\x57'
b'\x00\x00\x00\x00')
t = VLANtagISL(s)
assert t.pri == 3
assert t.id == 555
assert t.bpdu == 1
assert str(t) == str(s)
assert len(t) == len(s)
def test_eth_802dot1q():
from . import ip
s = (b'\x00\x60\x08\x9f\xb1\xf3\x00\x40\x05\x40\xef\x24\x81\x00\x90\x20\x08'
b'\x00\x45\x00\x00\x34\x3b\x64\x40\x00\x40\x06\xb7\x9b\x83\x97\x20\x81'
b'\x83\x97\x20\x15\x04\x95\x17\x70\x51\xd4\xee\x9c\x51\xa5\x5b\x36\x80'
b'\x10\x7c\x70\x12\xc7\x00\x00\x01\x01\x08\x0a\x00\x04\xf0\xd4\x01\x99'
b'\xa3\xfd')
eth = Ethernet(s)
assert eth.cfi == 1
assert eth.vlanid == 32
assert eth.priority == 4
assert len(eth.vlan_tags) == 1
assert eth.vlan_tags[0].type == ETH_TYPE_IP
assert isinstance(eth.data, ip.IP)
# construction
assert str(eth) == str(s), 'pack 1'
assert str(eth) == str(s), 'pack 2'
assert len(eth) == len(s)
# construction with kwargs
eth2 = Ethernet(src=eth.src, dst=eth.dst, vlan_tags=eth.vlan_tags, data=eth.data)
assert str(eth2) == str(s)
# construction w/o the tag
del eth.vlan_tags, eth.cfi, eth.vlanid, eth.priority
assert str(eth) == str(s[:12] + b'\x08\x00' + s[18:])
def test_eth_802dot1q_stacked(): # 2 VLAN tags
from binascii import unhexlify
import pytest
from . import ip
s = unhexlify(
'001bd41ba4d80013c3dfae18810000768100000a0800'
'45000064000f0000ff01929b0a760a010a760a020800'
'ceb70003000000000000001faf70abcdabcdabcdabcd'
'abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd'
'abcdabcdabcdabcdabcdabcdabcdabcdabcdabcdabcd'
'abcdabcdabcdabcdabcdabcd'
)
eth = Ethernet(s)
assert eth.type == ETH_TYPE_8021Q
assert len(eth.vlan_tags) == 2
assert eth.vlan_tags[0].id == 118
assert eth.vlan_tags[1].id == 10
assert eth.vlan_tags[0].type == ETH_TYPE_8021Q
assert eth.vlan_tags[1].type == ETH_TYPE_IP
assert [t.as_tuple() for t in eth.vlan_tags] == [(118, 0, 0), (10, 0, 0)]
assert isinstance(eth.data, ip.IP)
# construction
assert len(eth) == len(s)
assert bytes(eth) == s
# test packing failure with too many tags
eth.vlan_tags += eth.vlan_tags[0] # just duplicate the first tag
with pytest.raises(dpkt.PackError, match='maximum is 2 VLAN tags per Ethernet frame'):
bytes(eth)
# construction with kwargs
eth2 = Ethernet(src=eth.src, dst=eth.dst, vlan_tags=eth.vlan_tags[:2], data=eth.data)
# construction sets ip.type to 802.1ad instead of 802.1q so account for it
assert str(eth2) == str(s[:12] + b'\x88\xa8' + s[14:])
# construction w/o the tags
del eth.vlan_tags, eth.cfi, eth.vlanid, eth.priority
assert str(eth) == str(s[:12] + b'\x08\x00' + s[22:])
def test_eth_vlan_arp():
from . import arp
# 2 VLAN tags + ARP
s = (b'\xff\xff\xff\xff\xff\xff\xca\x03\x0d\xb4\x00\x1c\x81\x00\x00\x64\x81\x00\x00\xc8\x08\x06'
b'\x00\x01\x08\x00\x06\x04\x00\x01\xca\x03\x0d\xb4\x00\x1c\xc0\xa8\x02\xc8\x00\x00\x00\x00'
b'\x00\x00\xc0\xa8\x02\xfe\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00')
eth = Ethernet(s)
assert len(eth.vlan_tags) == 2
assert eth.vlan_tags[0].type == ETH_TYPE_8021Q
assert eth.vlan_tags[1].type == ETH_TYPE_ARP
assert isinstance(eth.data, arp.ARP)
def test_eth_mpls_stacked(): # Eth - MPLS - MPLS - IP - ICMP
from . import ip
from . import icmp
s = (b'\x00\x30\x96\xe6\xfc\x39\x00\x30\x96\x05\x28\x38\x88\x47\x00\x01\x20\xff\x00\x01\x01\xff'
b'\x45\x00\x00\x64\x00\x50\x00\x00\xff\x01\xa7\x06\x0a\x1f\x00\x01\x0a\x22\x00\x01\x08\x00'
b'\xbd\x11\x0f\x65\x12\xa0\x00\x00\x00\x00\x00\x53\x9e\xe0' + b'\xab\xcd' * 32)
eth = Ethernet(s)
assert len(eth.mpls_labels) == 2
assert eth.mpls_labels[0].val == 18
assert eth.mpls_labels[1].val == 16
assert eth.labels == [(18, 0, 255), (16, 0, 255)]
assert isinstance(eth.data, ip.IP)
assert isinstance(eth.data.data, icmp.ICMP)
# exercise .pprint() for the coverage tests
eth.pprint()
# construction
assert str(eth) == str(s), 'pack 1'
assert str(eth) == str(s), 'pack 2'
assert len(eth) == len(s)
# construction with kwargs
eth2 = Ethernet(src=eth.src, dst=eth.dst, mpls_labels=eth.mpls_labels, data=eth.data)
assert str(eth2) == str(s)
# construction w/o labels
del eth.labels, eth.mpls_labels
assert str(eth) == str(s[:12] + b'\x08\x00' + s[22:])
def test_eth_mpls_ipv6(): # Eth - MPLS - IP6 - TCP
from . import ip6
from . import tcp
s = ( b'\x00\x30\x96\xe6\xfc\x39\x00\x30\x96\x05\x28\x38\x88\x47\x00\x01'
b'\x01\xff\x62\x8c\xed\x7b\x00\x28\x06\xfd\x22\x22\x22\x22\x03\x3f'
b'\x53\xd3\x48\xfb\x8b\x5a\x41\x7f\xe6\x17\x11\x11\x11\x11\x40\x0b'
b'\x08\x09\x00\x00\x00\x00\x00\x00\x20\x0e\xa1\x8e\x01\xbb\xd6\xde'
b'\x73\x17\x00\x00\x00\x00\xa0\x02\xff\xff\x58\x7f\x00\x00\x02\x04'
b'\x05\x8c\x04\x02\x08\x0a\x69\x23\xe8\x63\x00\x00\x00\x00\x01\x03'
b'\x03\x0a\xaf\x9c\xb6\x93')
eth = Ethernet(s)
assert len(eth.mpls_labels) == 1
assert eth.mpls_labels[0].val == 16
assert eth.labels == [(16, 0, 255)]
assert isinstance(eth.data, ip6.IP6)
assert isinstance(eth.data.data, tcp.TCP)
def test_isl_eth_llc_stp(): # ISL - 802.3 Ethernet(w/FCS) - LLC - STP
from . import stp
s = (b'\x01\x00\x0c\x00\x00\x03\x00\x02\xfd\x2c\xb8\x97\x00\x00\xaa\xaa\x03\x00\x00\x00\x02\x9b'
b'\x00\x00\x00\x00\x01\x80\xc2\x00\x00\x00\x00\x02\xfd\x2c\xb8\x98\x00\x26\x42\x42\x03\x00'
b'\x00\x00\x00\x00\x80\x00\x00\x02\xfd\x2c\xb8\x83\x00\x00\x00\x00\x80\x00\x00\x02\xfd\x2c'
b'\xb8\x83\x80\x26\x00\x00\x14\x00\x02\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x41\xc6'
b'\x75\xd6')
eth = Ethernet(s)
assert eth.vlan == 333
assert len(eth.vlan_tags) == 1
assert eth.vlan_tags[0].id == 333
assert eth.vlan_tags[0].pri == 3
# check that FCS and padding were decoded
assert eth.fcs == 0x41c675d6
assert eth.padding == b'\x00' * 8
# stack
assert isinstance(eth.data, llc.LLC)
assert isinstance(eth.data.data, stp.STP)
# construction
assert str(eth) == str(s), 'pack 1'
assert str(eth) == str(s), 'pack 2'
assert len(eth) == len(s)
# construction with kwargs
eth2 = Ethernet(src=eth.src, dst=eth.dst, vlan_tags=eth.vlan_tags, data=eth.data)
eth2.padding = b'\x00' * 8
# test FCS computation
eth2.fcs = None
assert str(eth2) == str(s)
# TODO: test padding construction
# eth2.padding = None
# assert str(eth2) == str(s)
# construction w/o the ISL tag
del eth.vlan_tags, eth.vlan
assert str(eth) == str(s[26:])
def test_eth_llc_snap_cdp(): # 802.3 Ethernet - LLC/SNAP - CDP
from . import cdp
s = (b'\x01\x00\x0c\xcc\xcc\xcc\xc4\x022k\x00\x00\x01T\xaa\xaa\x03\x00\x00\x0c \x00\x02\xb4,B'
b'\x00\x01\x00\x06R2\x00\x05\x00\xffCisco IOS Software, 3700 Software (C3745-ADVENTERPRI'
b'SEK9_SNA-M), Version 12.4(25d), RELEASE SOFTWARE (fc1)\nTechnical Support: http://www.'
b'cisco.com/techsupport\nCopyright (c) 1986-2010 by Cisco Systems, Inc.\nCompiled Wed 18'
b'-Aug-10 08:18 by prod_rel_team\x00\x06\x00\x0eCisco 3745\x00\x02\x00\x11\x00\x00\x00\x01'
b'\x01\x01\xcc\x00\x04\n\x00\x00\x02\x00\x03\x00\x13FastEthernet0/0\x00\x04\x00\x08\x00'
b'\x00\x00)\x00\t\x00\x04\x00\x0b\x00\x05\x00')
eth = Ethernet(s)
# stack
assert isinstance(eth.data, llc.LLC)
assert isinstance(eth.data.data, cdp.CDP)
assert len(eth.data.data.tlvs) == 8 # number of CDP TLVs; ensures they are decoded
assert str(eth) == str(s), 'pack 1'
assert str(eth) == str(s), 'pack 2'
assert len(eth) == len(s)
def test_eth_llc_ipx(): # 802.3 Ethernet - LLC - IPX
from . import ipx
s = (b'\xff\xff\xff\xff\xff\xff\x00\xb0\xd0\x22\xf7\xf3\x00\x54\xe0\xe0\x03\xff\xff\x00\x50\x00'
b'\x14\x00\x00\x00\x00\xff\xff\xff\xff\xff\xff\x04\x55\x00\x00\x00\x00\x00\xb0\xd0\x22\xf7'
b'\xf3\x04\x55\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x01\x02\x5f\x5f\x4d\x53\x42'
b'\x52\x4f\x57\x53\x45\x5f\x5f\x02\x01\x00')
eth = Ethernet(s)
# stack
assert isinstance(eth.data, llc.LLC)
assert isinstance(eth.data.data, ipx.IPX)
assert eth.data.data.pt == 0x14
assert str(eth) == str(s), 'pack 1'
assert str(eth) == str(s), 'pack 2'
assert len(eth) == len(s)
def test_eth_pppoe(): # Eth - PPPoE - IPv6 - UDP - DHCP6
from . import ip6
from . import ppp
from . import pppoe
from . import udp
s = (b'\xca\x01\x0e\x88\x00\x06\xcc\x05\x0e\x88\x00\x00\x88\x64\x11\x00\x00\x11\x00\x64\x57\x6e'
b'\x00\x00\x00\x00\x3a\x11\xff\xfe\x80\x00\x00\x00\x00\x00\x00\xce\x05\x0e\xff\xfe\x88\x00'
b'\x00\xff\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x02\x02\x22\x02\x23\x00'
b'\x3a\x1a\x67\x01\xfc\x24\xab\x00\x08\x00\x02\x05\xe9\x00\x01\x00\x0a\x00\x03\x00\x01\xcc'
b'\x05\x0e\x88\x00\x00\x00\x06\x00\x06\x00\x19\x00\x17\x00\x18\x00\x19\x00\x0c\x00\x09\x00'
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00')
eth = Ethernet(s)
# stack
assert isinstance(eth.data, pppoe.PPPoE)
assert isinstance(eth.data.data, ppp.PPP)
assert isinstance(eth.data.data.data, ip6.IP6)
assert isinstance(eth.data.data.data.data, udp.UDP)
# construction
assert str(eth) == str(s)
assert len(eth) == len(s)
def test_eth_2mpls_ecw_eth_llc_stp(): # Eth - MPLS - MPLS - PW ECW - 802.3 Eth(no FCS) - LLC - STP
from . import stp
s = (b'\xcc\x01\x0d\x5c\x00\x10\xcc\x00\x0d\x5c\x00\x10\x88\x47\x00\x01\x20\xfe\x00\x01\x01\xff'
b'\x00\x00\x00\x00\x01\x80\xc2\x00\x00\x00\xcc\x04\x0d\x5c\xf0\x00\x00\x26\x42\x42\x03\x00'
b'\x00\x00\x00\x00\x80\x00\xcc\x04\x0d\x5c\x00\x00\x00\x00\x00\x00\x80\x00\xcc\x04\x0d\x5c'
b'\x00\x00\x80\x01\x00\x00\x14\x00\x02\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00')
eth = Ethernet(s)
assert len(eth.mpls_labels) == 2
assert eth.mpls_labels[0].val == 18
assert eth.mpls_labels[1].val == 16
# stack
eth2 = eth.data
assert isinstance(eth2, Ethernet)
assert eth2.len == 38 # 802.3 Ethernet
# no FCS, no trailer, just 8 bytes of padding (60=38+14+8)
assert not hasattr(eth2, 'fcs')
assert eth2.padding == b'\x00' * 8
assert isinstance(eth2.data, llc.LLC)
assert isinstance(eth2.data.data, stp.STP)
assert eth2.data.data.port_id == 0x8001
# construction
# XXX - FIXME: make packing account for the ECW
# assert str(eth) == str(s)
# QinQ: Eth - 802.1ad - 802.1Q - IP
def test_eth_802dot1ad_802dot1q_ip():
from . import ip
s = (b'\x00\x10\x94\x00\x00\x0c\x00\x10\x94\x00\x00\x14\x88\xa8\x00\x1e\x81\x00\x00\x64\x08\x00'
b'\x45\x00\x05\xc2\x54\xb0\x00\x00\xff\xfd\xdd\xbf\xc0\x55\x01\x16\xc0\x55\x01\x0e' +
1434 * b'\x00' + b'\x4f\xdc\xcd\x64\x20\x8d\xb6\x4e\xa8\x45\xf8\x80\xdd\x0c\xf9\x72\xc4'
b'\xd0\xcf\xcb\x46\x6d\x62\x7a')
eth = Ethernet(s)
assert eth.type == ETH_TYPE_8021AD
assert eth.vlan_tags[0].id == 30
assert eth.vlan_tags[1].id == 100
assert isinstance(eth.data, ip.IP)
e1 = Ethernet(s[:-1458]) # strip IP data
# construction
e2 = Ethernet(
dst=b'\x00\x10\x94\x00\x00\x0c', src=b'\x00\x10\x94\x00\x00\x14',
type=ETH_TYPE_8021AD,
vlan_tags=[
VLANtag8021Q(pri=0, id=30, cfi=0),
VLANtag8021Q(pri=0, id=100, cfi=0)
],
data=ip.IP(
len=1474, id=21680, ttl=255, p=253, sum=56767,
src=b'\xc0U\x01\x16', dst=b'\xc0U\x01\x0e', opts=b''
)
)
assert str(e1) == str(e2)
def test_eth_pack():
eth = Ethernet(data=b'12345')
assert str(eth)
def test_eth_802dot1q_with_unfamiliar_data():
profinet_data = (
b'\xfe\xff\x05\x01\x05\x01\x00\x02\x00\x00\x00\x6c\x02'
b'\x05\x00\x12\x00\x00\x02\x01\x02\x02\x02\x03\x02\x04\x02\x05\x02'
b'\x06\x01\x01\x01\x02\x02\x01\x00\x08\x00\x00\x53\x37\x2d\x33\x30'
b'\x30\x02\x02\x00\x22\x00\x00\x70\x6c\x63\x78\x62\x33\x30\x30\x78'
b'\x6b\x63\x70\x75\x78\x61\x33\x31\x37\x2d\x32\x78\x61\x70\x6e\x78'
b'\x72\x64\x70\x32\x32\x63\x66\x02\x03\x00\x06\x00\x00\x00\x2a\x01'
b'\x01\x02\x04\x00\x04\x00\x00\x02\x00\x01\x02\x00\x0e\x00\x01\xc0'
b'\xa8\x3c\x87\xff\xff\xff\x00\xc0\xa8\x3c\x87')
s = (b'\x00\x0c\x29\x65\x1c\x29\x00\x0e\x8c\x8a\xa2\x5e\x81\x00\x00\x00'
b'\x88\x92' + profinet_data)
eth = Ethernet(s)
assert eth.type == ETH_TYPE_8021Q
assert len(eth.vlan_tags) == 1
assert eth.vlan_tags[0].type == ETH_TYPE_PROFINET
assert isinstance(eth.data, bytes)
assert eth.data == profinet_data
def test_eth_802dot1q_with_arp_data(): # https://github.com/kbandla/dpkt/issues/460
from .arp import ARP
e = Ethernet(src=b'foobar', dst=b'\xff' * 6)
v = VLANtag8021Q(pri=0, cfi=0, id=1)
e.vlan_tags = [v]
a = ARP(sha=b'foobar', spa=b'\x0a\x0a\x0a\x0a',
tha=b'', tpa=b'\x0a\x0a\x0a\x05')
e.data = a
assert bytes(e) == (
b'\xff\xff\xff\xff\xff\xfffoobar\x81\x00\x00\x01\x08\x06' # 0x0806 = next layer is ARP
b'\x00\x01\x08\x00\x06\x04\x00\x01foobar\x0a\x0a\x0a\x0a'
b'\x00\x00\x00\x00\x00\x00\x0a\x0a\x0a\x05')
# 802.3 Ethernet - LLC/STP - Padding - FCS - Metamako trailer
def test_eth_8023_llc_trailer(): # https://github.com/kbandla/dpkt/issues/438
d = (b'\x01\x80\xc2\x00\x00\x00\x78\x0c\xf0\xb4\xd8\x91\x00\x27\x42\x42\x03\x00\x00\x02\x02\x3c'
b'\x00\x01\x2c\x33\x11\xf2\x39\xc1\x00\x00\x00\x02\x80\x01\x78\x0c\xf0\xb4\xd8\xbc\x80\xaa'
b'\x01\x00\x14\x00\x02\x00\x0f\x00\x00\x00\x00\x00\x00\x00\x00\x00\x4d\xb9\x81\x20\x5c\x1e'
b'\x5f\xba\x3a\xa5\x47\xfa\x01\x8e\x52\x03')
eth = Ethernet(d)
assert eth.len == 39
assert eth.padding == b'\x00\x00\x00\x00\x00\x00\x00'
assert eth.fcs == 0x4db98120
assert eth.trailer == b'\x5c\x1e\x5f\xba\x3a\xa5\x47\xfa\x01\x8e\x52\x03'
assert isinstance(eth.data, llc.LLC)
# packing
assert bytes(eth) == d
# FCS computation
eth.fcs = None
assert bytes(eth) == d
def test_eth_novell():
from binascii import unhexlify
import dpkt
buf = unhexlify(
'010203040506' # dst
'0708090a0b0c' # src
'0000' # type (ignored)
'ffff' # indicates Novell
# IPX packet
'0000' # sum
'0001' # len
'02' # tc
'03' # pt
'0102030405060708090a0b0c' # dst
'0102030405060708090a0b0c' # src
)
eth = Ethernet(buf)
assert isinstance(eth.data, dpkt.ipx.IPX)
assert eth.data.tc == 2
assert eth.data.data == b''
| 31,940 | 35.462329 | 114 |
py
|
dpkt
|
dpkt-master/dpkt/cdp.py
|
# $Id: cdp.py 23 2006-11-08 15:45:33Z dugsong $
# -*- coding: utf-8 -*-
"""Cisco Discovery Protocol."""
from __future__ import absolute_import
from . import dpkt
CDP_DEVID = 1 # string
CDP_ADDRESS = 2
CDP_PORTID = 3 # string
CDP_CAPABILITIES = 4 # 32-bit bitmask
CDP_VERSION = 5 # string
CDP_PLATFORM = 6 # string
CDP_IPPREFIX = 7
CDP_VTP_MGMT_DOMAIN = 9 # string
CDP_NATIVE_VLAN = 10 # 16-bit integer
CDP_DUPLEX = 11 # 8-bit boolean
CDP_TRUST_BITMAP = 18 # 8-bit bitmask0x13
CDP_UNTRUST_COS = 19 # 8-bit port
CDP_SYSTEM_NAME = 20 # string
CDP_SYSTEM_OID = 21 # 10-byte binary string
CDP_MGMT_ADDRESS = 22 # 32-bit number of addrs, Addresses
CDP_LOCATION = 23 # string
class CDP(dpkt.Packet):
"""Cisco Discovery Protocol.
Cisco Discovery Protocol (CDP) is a proprietary Data Link Layer protocol developed by Cisco Systems in 1994
by Keith McCloghrie and Dino Farinacci. It is used to share information about other directly connected
Cisco equipment, such as the operating system version and IP address.
See more on
https://en.wikipedia.org/wiki/Cisco_Discovery_Protocol
Attributes:
__hdr__: Header fields of CDP.
version: (int): CDP protocol version. (1 byte)
ttl: (int): Time to live. The amount of time in seconds that a receiver should retain the information
contained in this packet. (1 byte)
sum: (int): Checksum. (2 bytes)
"""
__hdr__ = (
('version', 'B', 2),
('ttl', 'B', 180),
('sum', 'H', 0)
)
class TLV(dpkt.Packet):
"""Type–length–value
When constructing the packet, len is not mandatory:
if not provided, then self.data must be this exact TLV payload
Attributes:
__hdr__: Header fields of TLV.
type: (int): Type (2 bytes)
len: (int): The total length in bytes of the Type, Length and Data fields. (2 bytes)
"""
__hdr__ = (
('type', 'H', 0),
('len', 'H', 0)
)
def data_len(self):
if self.len:
return self.len - self.__hdr_len__
return len(self.data)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = self.data[:self.data_len()]
def __len__(self):
return self.__hdr_len__ + len(self.data)
def __bytes__(self):
if hasattr(self, 'len') and not self.len:
self.len = len(self)
return self.pack_hdr() + bytes(self.data)
class Address(TLV):
# XXX - only handle NLPID/IP for now
__hdr__ = (
('ptype', 'B', 1), # protocol type (NLPID)
('plen', 'B', 1), # protocol length
('p', 'B', 0xcc), # IP
('alen', 'H', 4) # address length
)
def data_len(self):
return self.alen
class TLV_Addresses(TLV):
__hdr__ = (
('type', 'H', CDP_ADDRESS),
('len', 'H', 0), # 17),
('Addresses', 'L', 1),
)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
buf = self.data
l_ = []
while buf:
# find the right TLV according to Type value
tlv_find_type = self.TLV(buf).type
# if this TLV is not in tlv_types, use the default TLV class
tlv = self.tlv_types.get(tlv_find_type, self.TLV)(buf)
l_.append(bytes(tlv))
buf = buf[len(tlv):]
self.tlvs = l_
self.data = b''.join(l_)
def __len__(self):
return self.__hdr_len__ + len(self.data)
def __bytes__(self):
data = bytes(self.data)
if not self.sum:
self.sum = dpkt.in_cksum(self.pack_hdr() + data)
return self.pack_hdr() + data
# keep here the TLV classes whose header is different from the generic TLV header (example : TLV_Addresses)
tlv_types = {CDP_ADDRESS: TLV_Addresses}
def test_cdp():
import socket
from . import ethernet
ss = (b'\x02\xb4\xdf\x93\x00\x01\x00\x09\x63\x69\x73\x63\x6f\x00\x02\x00\x11\x00\x00\x00\x01'
b'\x01\x01\xcc\x00\x04\xc0\xa8\x01\x67')
rr1 = CDP(ss)
assert bytes(rr1) == ss
# construction
ss = (b'\x02\xb4\xdf\x93\x00\x01\x00\x09\x63\x69\x73\x63\x6f\x00\x02\x00\x11\x00\x00\x00\x01'
b'\x01\x01\xcc\x00\x04\xc0\xa8\x01\x67')
p1 = CDP.TLV_Addresses(data=CDP.Address(data=socket.inet_aton('192.168.1.103')))
p2 = CDP.TLV(type=CDP_DEVID, data=b'cisco')
data = p2.pack() + p1.pack()
rr2 = CDP(data=data)
assert bytes(rr2) == ss
s = (b'\x01\x00\x0c\xcc\xcc\xcc\xc4\x022k\x00\x00\x01T\xaa\xaa\x03\x00\x00\x0c \x00\x02\xb4,B'
b'\x00\x01\x00\x06R2\x00\x05\x00\xffCisco IOS Software, 3700 Software (C3745-ADVENTERPRI'
b'SEK9_SNA-M), Version 12.4(25d), RELEASE SOFTWARE (fc1)\nTechnical Support: http://www.'
b'cisco.com/techsupport\nCopyright (c) 1986-2010 by Cisco Systems, Inc.\nCompiled Wed 18'
b'-Aug-10 08:18 by prod_rel_team\x00\x06\x00\x0eCisco 3745\x00\x02\x00\x11\x00\x00\x00\x01'
b'\x01\x01\xcc\x00\x04\n\x00\x00\x02\x00\x03\x00\x13FastEthernet0/0\x00\x04\x00\x08\x00'
b'\x00\x00)\x00\t\x00\x04\x00\x0b\x00\x05\x00')
eth = ethernet.Ethernet(s)
assert isinstance(eth.data.data, CDP)
assert len(eth.data.data.tlvs) == 8 # number of CDP TLVs; ensures they are decoded
assert str(eth) == str(s)
assert len(eth) == len(s)
def test_tlv():
from binascii import unhexlify
# len field set to 0
buf_no_len = unhexlify(
'0000' # type
'0000' # len
'abcd' # data
)
buf_with_len = unhexlify(
'0000' # type
'0006' # len
'abcd' # data
)
tlv = CDP.TLV(buf_no_len)
assert tlv.type == 0
assert tlv.len == 0
assert tlv.data_len() == 2
assert tlv.data == b'\xab\xcd'
assert bytes(tlv) == buf_with_len
# len field set manually
tlv = CDP.TLV(buf_with_len)
assert tlv.type == 0
assert tlv.len == 6
assert tlv.data_len() == 2
assert tlv.data == b'\xab\xcd'
assert bytes(tlv) == buf_with_len
def test_address():
from binascii import unhexlify
buf = unhexlify(
'00' # ptype
'11' # plen
'22' # p
'3333' # alen
)
address = CDP.Address(buf)
assert address.data_len() == 0x3333
| 6,446 | 30.758621 | 113 |
py
|
dpkt
|
dpkt-master/dpkt/ssl_ciphersuites.py
|
# Copyright 2012 Google Inc. All rights reserved.
# -*- coding: utf-8 -*-
"""
Nicely formatted cipher suite definitions for TLS
A list of cipher suites in the form of CipherSuite objects.
These are supposed to be immutable; don't mess with them.
"""
class CipherSuite(object):
"""
Encapsulates a cipher suite.
Members/args:
* code: two-byte ID code, as int
* kx: key exchange algorithm, e.g. 'RSA' or 'DHE'
* auth: authentication algorithm, e.g. 'RSA' or 'DSS'
* cipher: stream or block cipher algorithm, e.g. 'AES_128'
* mode: mode of operation for block ciphers, e.g. 'CBC' or 'GCM'
* mac: message authentication code algorithm, e.g. 'MD5' or 'SHA256'
* name: cipher suite name as defined in the RFCs,
e.g. 'TLS_RSA_WITH_RC4_40_MD5', can be generated by default from the
other parameters
* encoding: encoding algorithm, defaults to cipher+mode
Additional members:
* kx_auth: kx+auth algorithm, as 'KeyExchangeAlgorithm' in RFCs
"""
def __init__(self, code, kx, auth, cipher, mode, mac, name=None, encoding=None):
self.code = code
# We strip trailing whitespace here because we want to format the
# global table nicely while making pylint happy.
self._kx = kx.rstrip()
self._auth = auth.rstrip()
self.cipher = cipher.rstrip()
self.mode = mode.rstrip()
self.mac = mac.rstrip()
self._name = name
self._encoding = encoding
@property
def kx(self):
if self._kx == '': # for PSK
return self._auth
else:
return self._kx
@property
def auth(self):
if self._auth == '': # for RSA
return self._kx
else:
return self._auth
@property
def kx_auth(self):
if self._auth == '': # for RSA
return self._kx
elif self._kx == '': # for PSK
return self._auth
else:
return self._kx + '_' + self._auth
@property
def encoding(self):
if self._encoding is None:
if self.mode == '':
return self.cipher
else:
return self.cipher + '_' + self.mode
else:
return self._encoding
@property
def name(self):
if self._name is None:
if self.mac == '': # for CCM and CCM_8 modes
return 'TLS_' + self.kx_auth + '_WITH_' + self.encoding
else:
return 'TLS_' + self.kx_auth + '_WITH_' + self.encoding + '_' + self.mac
else:
return self._name
def __repr__(self):
return 'CipherSuite(0x%04x, %s)' % (self.code, self.name)
MAC_SIZES = {
'MD5': 16,
'SHA': 20,
'SHA256': 32,
'SHA384': 48,
}
BLOCK_SIZES = {
'3DES_EDE': 8,
'AES_128': 16,
'AES_256': 16,
'ARIA': 16,
'CAMELLIA_128': 16,
'CAMELLIA_256': 16,
'CHACHA20': 64,
'DES': 8,
'DES40': 8,
'IDEA': 8,
'RC2_40': 8,
'RC4_40': None,
'RC4_128': None,
'SEED': 16,
}
@property
def mac_size(self):
"""In bytes. Default to 0."""
return self.MAC_SIZES.get(self.mac, 0)
@property
def block_size(self):
"""In bytes. Default to 1."""
return self.BLOCK_SIZES.get(self.cipher, 1)
@property
def pfs(self):
return self.kx in ('DHE', 'ECDHE')
@property
def aead(self):
return self.mode in ('CCM', 'CCM_8', 'GCM')
@property
def anonymous(self):
return self.auth.startswith('anon')
def get_unknown_ciphersuite(code):
return CipherSuite(code, '', '', '', '', '', name='Unknown')
# master list of CipherSuite Objects
# Full list from IANA:
# https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml
CIPHERSUITES = [
# not a real cipher suite, can be ignored, see RFC5746
CipherSuite(0x00ff, 'NULL', ' ', 'NULL ', ' ', 'NULL', 'TLS_EMPTY_RENEGOTIATION_INFO'),
# RFC7507
CipherSuite(0x5600, '', ' ', '', '', '', 'TLS_FALLBACK'),
CipherSuite(0xffff, '', ' ', '', '', '', 'UNKNOWN_CIPHER'),
# RFC2246 : TLS 1.0
CipherSuite(0x0000, 'NULL', ' ', 'NULL ', ' ', 'NULL'),
CipherSuite(0x0001, 'RSA', ' ', 'NULL ', ' ', 'MD5'),
CipherSuite(0x0002, 'RSA', ' ', 'NULL ', ' ', 'SHA'),
CipherSuite(0x0003, 'RSA_EXPORT', ' ', 'RC4_40 ', ' ', 'MD5'),
CipherSuite(0x0004, 'RSA', ' ', 'RC4_128 ', ' ', 'MD5'),
CipherSuite(0x0005, 'RSA', ' ', 'RC4_128 ', ' ', 'SHA'),
CipherSuite(0x0006, 'RSA_EXPORT', ' ', 'RC2_40 ', 'CBC ', 'MD5', encoding='RC2_CBC_40'),
CipherSuite(0x0007, 'RSA', ' ', 'IDEA ', 'CBC ', 'SHA'),
CipherSuite(0x0008, 'RSA_EXPORT', ' ', 'DES40 ', 'CBC ', 'SHA'),
CipherSuite(0x0009, 'RSA', ' ', 'DES ', 'CBC ', 'SHA'),
CipherSuite(0x000a, 'RSA', ' ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0x000b, 'DH', 'DSS_EXPORT', 'DES40 ', 'CBC ', 'SHA'),
CipherSuite(0x000c, 'DH', 'DSS ', 'DES ', 'CBC ', 'SHA'),
CipherSuite(0x000d, 'DH', 'DSS ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0x000e, 'DH', 'RSA_EXPORT', 'DES40 ', 'CBC ', 'SHA'),
CipherSuite(0x000f, 'DH', 'RSA ', 'DES ', 'CBC ', 'SHA'),
CipherSuite(0x0010, 'DH', 'RSA ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0x0011, 'DHE', 'DSS_EXPORT', 'DES40 ', 'CBC ', 'SHA'),
CipherSuite(0x0012, 'DHE', 'DSS ', 'DES ', 'CBC ', 'SHA'),
CipherSuite(0x0013, 'DHE', 'DSS ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0x0014, 'DHE', 'RSA_EXPORT', 'DES40 ', 'CBC ', 'SHA'),
CipherSuite(0x0015, 'DHE', 'RSA ', 'DES ', 'CBC ', 'SHA'),
CipherSuite(0x0016, 'DHE', 'RSA ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0x0017, 'DH', 'anon_EXPORT', 'RC4_40 ', ' ', 'MD5'),
CipherSuite(0x0018, 'DH', 'anon ', 'RC4_128 ', ' ', 'MD5'),
CipherSuite(0x0019, 'DH', 'anon_EXPORT', 'DES40 ', 'CBC ', 'SHA'),
CipherSuite(0x001a, 'DH', 'anon ', 'DES ', 'CBC ', 'SHA'),
CipherSuite(0x001b, 'DH', 'anon ', '3DES_EDE', 'CBC ', 'SHA'),
# Reserved: 0x1c-0x1d
# RFC4346 : TLS 1.1
# RFC2712
CipherSuite(0x001e, 'KRB5', ' ', 'DES ', 'CBC ', 'SHA'),
CipherSuite(0x001f, 'KRB5', ' ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0x0020, 'KRB5', ' ', 'RC4_128 ', ' ', 'SHA'),
CipherSuite(0x0021, 'KRB5', ' ', 'IDEA ', 'CBC ', 'SHA'),
CipherSuite(0x0022, 'KRB5', ' ', 'DES ', 'CBC ', 'MD5'),
CipherSuite(0x0023, 'KRB5', ' ', '3DES_EDE', 'CBC ', 'MD5'),
CipherSuite(0x0024, 'KRB5', ' ', 'RC4_128 ', ' ', 'MD5'),
CipherSuite(0x0025, 'KRB5', ' ', 'IDEA ', 'CBC ', 'MD5'),
CipherSuite(0x0026, 'KRB5_EXPORT', ' ', 'DES40 ', 'CBC ', 'SHA', encoding='DES_CBC_40'),
CipherSuite(0x0027, 'KRB5_EXPORT', ' ', 'RC2_40 ', 'CBC ', 'SHA', encoding='RC2_CBC_40'),
CipherSuite(0x0028, 'KRB5_EXPORT', ' ', 'RC4_40 ', ' ', 'SHA'),
CipherSuite(0x0029, 'KRB5_EXPORT', ' ', 'DES40 ', 'CBC ', 'MD5', encoding='DES_CBC_40'),
CipherSuite(0x002a, 'KRB5_EXPORT', ' ', 'RC2_40 ', 'CBC ', 'MD5', encoding='RC2_CBC_40'),
CipherSuite(0x002b, 'KRB5_EXPORT', ' ', 'RC4_40 ', ' ', 'MD5'),
# RFC4785
CipherSuite(0x002c, ' ', 'PSK ', 'NULL ', ' ', 'SHA'),
CipherSuite(0x002d, 'DHE ', 'PSK ', 'NULL ', ' ', 'SHA'),
CipherSuite(0x002e, 'RSA ', 'PSK ', 'NULL ', ' ', 'SHA'),
# RFC3268
CipherSuite(0x002f, 'RSA ', ' ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0x0030, 'DH ', 'DSS ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0x0031, 'DH ', 'RSA ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0x0032, 'DHE ', 'DSS ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0x0033, 'DHE ', 'RSA ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0x0034, 'DH ', 'anon ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0x0035, 'RSA ', ' ', 'AES_256 ', 'CBC ', 'SHA'),
CipherSuite(0x0036, 'DH ', 'DSS ', 'AES_256 ', 'CBC ', 'SHA'),
CipherSuite(0x0037, 'DH ', 'RSA ', 'AES_256 ', 'CBC ', 'SHA'),
CipherSuite(0x0038, 'DHE ', 'DSS ', 'AES_256 ', 'CBC ', 'SHA'),
CipherSuite(0x0039, 'DHE ', 'RSA ', 'AES_256 ', 'CBC ', 'SHA'),
CipherSuite(0x003a, 'DH ', 'anon ', 'AES_256 ', 'CBC ', 'SHA'),
# RFC5246 : TLS 1.2
CipherSuite(0x003b, 'RSA ', ' ', 'NULL ', ' ', 'SHA256'),
CipherSuite(0x003c, 'RSA ', ' ', 'AES_128 ', 'CBC ', 'SHA256'),
CipherSuite(0x003d, 'RSA ', ' ', 'AES_256 ', 'CBC ', 'SHA256'),
CipherSuite(0x003e, 'DH ', 'DSS ', 'AES_128 ', 'CBC ', 'SHA256'),
CipherSuite(0x003f, 'DH ', 'RSA ', 'AES_128 ', 'CBC ', 'SHA256'),
CipherSuite(0x0040, 'DHE ', 'DSS ', 'AES_128 ', 'CBC ', 'SHA256'),
# RFC5932
CipherSuite(0x0041, 'RSA ', ' ', 'CAMELLIA_128', 'CBC', 'SHA'),
CipherSuite(0x0042, 'DH ', 'DSS ', 'CAMELLIA_128', 'CBC', 'SHA'),
CipherSuite(0x0043, 'DH ', 'RSA ', 'CAMELLIA_128', 'CBC', 'SHA'),
CipherSuite(0x0044, 'DHE ', 'DSS ', 'CAMELLIA_128', 'CBC', 'SHA'),
CipherSuite(0x0045, 'DHE ', 'RSA ', 'CAMELLIA_128', 'CBC', 'SHA'),
CipherSuite(0x0046, 'DH ', 'anon ', 'CAMELLIA_128', 'CBC', 'SHA'),
# Reserved: 0x47-5c
# Unassigned: 0x5d-5f
# Reserved: 0x60-66
# RFC5246 : TLS 1.2
CipherSuite(0x0067, 'DHE ', 'RSA ', 'AES_128 ', 'CBC ', 'SHA256'),
CipherSuite(0x0068, 'DH ', 'DSS ', 'AES_256 ', 'CBC ', 'SHA256'),
CipherSuite(0x0069, 'DH ', 'RSA ', 'AES_256 ', 'CBC ', 'SHA256'),
CipherSuite(0x006a, 'DHE ', 'DSS ', 'AES_256 ', 'CBC ', 'SHA256'),
CipherSuite(0x006b, 'DHE ', 'RSA ', 'AES_256 ', 'CBC ', 'SHA256'),
CipherSuite(0x006c, 'DH ', 'anon ', 'AES_128 ', 'CBC ', 'SHA256'),
CipherSuite(0x006d, 'DH ', 'anon ', 'AES_256 ', 'CBC ', 'SHA256'),
# Unassigned: 0x6e-83
# RFC5932
CipherSuite(0x0084, 'RSA ', ' ', 'CAMELLIA_256', 'CBC', 'SHA'),
CipherSuite(0x0085, 'DH ', 'DSS ', 'CAMELLIA_256', 'CBC', 'SHA'),
CipherSuite(0x0086, 'DH ', 'RSA ', 'CAMELLIA_256', 'CBC', 'SHA'),
CipherSuite(0x0087, 'DHE ', 'DSS ', 'CAMELLIA_256', 'CBC', 'SHA'),
CipherSuite(0x0088, 'DHE ', 'RSA ', 'CAMELLIA_256', 'CBC', 'SHA'),
CipherSuite(0x0089, 'DH ', 'anon ', 'CAMELLIA_256', 'CBC', 'SHA'),
# RFC4279
CipherSuite(0x008a, ' ', 'PSK ', 'RC4_128 ', ' ', 'SHA'),
CipherSuite(0x008b, ' ', 'PSK ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0x008c, ' ', 'PSK ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0x008d, ' ', 'PSK ', 'AES_256 ', 'CBC ', 'SHA'),
CipherSuite(0x008e, 'DHE ', 'PSK ', 'RC4_128 ', ' ', 'SHA'),
CipherSuite(0x008f, 'DHE ', 'PSK ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0x0090, 'DHE ', 'PSK ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0x0091, 'DHE ', 'PSK ', 'AES_256 ', 'CBC ', 'SHA'),
CipherSuite(0x0092, 'RSA ', 'PSK ', 'RC4_128 ', ' ', 'SHA'),
CipherSuite(0x0093, 'RSA ', 'PSK ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0x0094, 'RSA ', 'PSK ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0x0095, 'RSA ', 'PSK ', 'AES_256 ', 'CBC ', 'SHA'),
# RFC4162
CipherSuite(0x0096, 'RSA ', ' ', 'SEED ', 'CBC ', 'SHA'),
CipherSuite(0x0097, 'DH ', 'DSS ', 'SEED ', 'CBC ', 'SHA'),
CipherSuite(0x0098, 'DH ', 'RSA ', 'SEED ', 'CBC ', 'SHA'),
CipherSuite(0x0099, 'DHE ', 'DSS ', 'SEED ', 'CBC ', 'SHA'),
CipherSuite(0x009a, 'DHE ', 'RSA ', 'SEED ', 'CBC ', 'SHA'),
CipherSuite(0x009b, 'DH ', 'anon ', 'SEED ', 'CBC ', 'SHA'),
# RFC5288
CipherSuite(0x009c, 'RSA ', ' ', 'AES_128 ', 'GCM ', 'SHA256'),
CipherSuite(0x009d, 'RSA ', ' ', 'AES_256 ', 'GCM ', 'SHA384'),
CipherSuite(0x009e, 'DHE ', 'RSA ', 'AES_128 ', 'GCM ', 'SHA256'),
CipherSuite(0x009f, 'DHE ', 'RSA ', 'AES_256 ', 'GCM ', 'SHA384'),
CipherSuite(0x00a0, 'DH ', 'RSA ', 'AES_128 ', 'GCM ', 'SHA256'),
CipherSuite(0x00a1, 'DH ', 'RSA ', 'AES_256 ', 'GCM ', 'SHA384'),
CipherSuite(0x00a2, 'DHE ', 'DSS ', 'AES_128 ', 'GCM ', 'SHA256'),
CipherSuite(0x00a3, 'DHE ', 'DSS ', 'AES_256 ', 'GCM ', 'SHA384'),
CipherSuite(0x00a4, 'DH ', 'DSS ', 'AES_128 ', 'GCM ', 'SHA256'),
CipherSuite(0x00a5, 'DH ', 'DSS ', 'AES_256 ', 'GCM ', 'SHA384'),
CipherSuite(0x00a6, 'DH ', 'anon ', 'AES_128 ', 'GCM ', 'SHA256'),
CipherSuite(0x00a7, 'DH ', 'anon ', 'AES_256 ', 'GCM ', 'SHA384'),
# RFC5487
CipherSuite(0x00a8, ' ', 'PSK ', 'AES_128 ', 'GCM ', 'SHA256'),
CipherSuite(0x00a9, ' ', 'PSK ', 'AES_256 ', 'GCM ', 'SHA384'),
CipherSuite(0x00aa, 'DHE ', 'PSK ', 'AES_128 ', 'GCM ', 'SHA256'),
CipherSuite(0x00ab, 'DHE ', 'PSK ', 'AES_256 ', 'GCM ', 'SHA384'),
CipherSuite(0x00ac, 'RSA ', 'PSK ', 'AES_128 ', 'GCM ', 'SHA256'),
CipherSuite(0x00ad, 'RSA ', 'PSK ', 'AES_256 ', 'GCM ', 'SHA384'),
CipherSuite(0x00ae, ' ', 'PSK ', 'AES_128 ', 'CBC ', 'SHA256'),
CipherSuite(0x00af, ' ', 'PSK ', 'AES_256 ', 'CBC ', 'SHA384'),
CipherSuite(0x00b0, ' ', 'PSK ', 'NULL ', ' ', 'SHA256'),
CipherSuite(0x00b1, ' ', 'PSK ', 'NULL ', ' ', 'SHA384'),
CipherSuite(0x00b2, 'DHE ', 'PSK ', 'AES_128 ', 'CBC ', 'SHA256'),
CipherSuite(0x00b3, 'DHE ', 'PSK ', 'AES_256 ', 'CBC ', 'SHA384'),
CipherSuite(0x00b4, 'DHE ', 'PSK ', 'NULL ', ' ', 'SHA256'),
CipherSuite(0x00b5, 'DHE ', 'PSK ', 'NULL ', ' ', 'SHA384'),
CipherSuite(0x00b6, 'RSA ', 'PSK ', 'AES_128 ', 'CBC ', 'SHA256'),
CipherSuite(0x00b7, 'RSA ', 'PSK ', 'AES_256 ', 'CBC ', 'SHA384'),
CipherSuite(0x00b8, 'RSA ', 'PSK ', 'NULL ', ' ', 'SHA256'),
CipherSuite(0x00b9, 'RSA ', 'PSK ', 'NULL ', ' ', 'SHA384'),
# RFC5932
CipherSuite(0x00ba, 'RSA ', ' ', 'CAMELLIA_128', 'CBC', 'SHA256'),
CipherSuite(0x00bb, 'DH ', 'DSS ', 'CAMELLIA_128', 'CBC', 'SHA256'),
CipherSuite(0x00bc, 'DH ', 'RSA ', 'CAMELLIA_128', 'CBC', 'SHA256'),
CipherSuite(0x00bd, 'DHE ', 'DSS ', 'CAMELLIA_128', 'CBC', 'SHA256'),
CipherSuite(0x00be, 'DHE ', 'RSA ', 'CAMELLIA_128', 'CBC', 'SHA256'),
CipherSuite(0x00bf, 'DH ', 'anon ', 'CAMELLIA_128', 'CBC', 'SHA256'),
CipherSuite(0x00c0, 'RSA ', ' ', 'CAMELLIA_256', 'CBC', 'SHA256'),
CipherSuite(0x00c1, 'DH ', 'DSS ', 'CAMELLIA_256', 'CBC', 'SHA256'),
CipherSuite(0x00c2, 'DH ', 'RSA ', 'CAMELLIA_256', 'CBC', 'SHA256'),
CipherSuite(0x00c3, 'DHE ', 'DSS ', 'CAMELLIA_256', 'CBC', 'SHA256'),
CipherSuite(0x00c4, 'DHE ', 'RSA ', 'CAMELLIA_256', 'CBC', 'SHA256'),
CipherSuite(0x00c5, 'DH ', 'anon ', 'CAMELLIA_256', 'CBC', 'SHA256'),
# RFC8446 TLS 1.3
CipherSuite(0x1301, ' ', ' ', 'AES_128 ', 'GCM ', 'SHA256', name='TLS_AES_128_GCM_SHA256'),
CipherSuite(0x1302, ' ', ' ', 'AES_256 ', 'GCM ', 'SHA384', name='TLS_AES_256_GCM_SHA384'),
CipherSuite(0x1303, ' ', ' ', 'CHACHA20', 'POLY1305', 'SHA256', name='TLS_CHACHA20_POLY1305_SHA256'),
CipherSuite(0x1304, ' ', ' ', 'AES_128 ', 'CCM ', 'SHA256', name='TLS_AES_128_CCM_SHA256'),
CipherSuite(0x1305, ' ', ' ', 'AES_128 ', 'CCM_8 ', 'SHA256', name='TLS_AES_128_CCM_8_SHA256'),
# RFC4492
CipherSuite(0xc001, 'ECDH ', 'ECDSA ', 'NULL ', ' ', 'SHA'),
CipherSuite(0xc002, 'ECDH ', 'ECDSA ', 'RC4_128 ', ' ', 'SHA'),
CipherSuite(0xc003, 'ECDH ', 'ECDSA ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0xc004, 'ECDH ', 'ECDSA ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0xc005, 'ECDH ', 'ECDSA ', 'AES_256 ', 'CBC ', 'SHA'),
CipherSuite(0xc006, 'ECDHE', 'ECDSA ', 'NULL ', ' ', 'SHA'),
CipherSuite(0xc007, 'ECDHE', 'ECDSA ', 'RC4_128 ', ' ', 'SHA'),
CipherSuite(0xc008, 'ECDHE', 'ECDSA ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0xc009, 'ECDHE', 'ECDSA ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0xc00a, 'ECDHE', 'ECDSA ', 'AES_256 ', 'CBC ', 'SHA'),
CipherSuite(0xc00b, 'ECDH ', 'RSA ', 'NULL ', ' ', 'SHA'),
CipherSuite(0xc00c, 'ECDH ', 'RSA ', 'RC4_128 ', ' ', 'SHA'),
CipherSuite(0xc00d, 'ECDH ', 'RSA ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0xc00e, 'ECDH ', 'RSA ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0xc00f, 'ECDH ', 'RSA ', 'AES_256 ', 'CBC ', 'SHA'),
CipherSuite(0xc010, 'ECDHE', 'RSA ', 'NULL ', ' ', 'SHA'),
CipherSuite(0xc011, 'ECDHE', 'RSA ', 'RC4_128 ', ' ', 'SHA'),
CipherSuite(0xc012, 'ECDHE', 'RSA ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0xc013, 'ECDHE', 'RSA ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0xc014, 'ECDHE', 'RSA ', 'AES_256 ', 'CBC ', 'SHA'),
CipherSuite(0xc015, 'ECDH ', 'anon ', 'NULL ', ' ', 'SHA'),
CipherSuite(0xc016, 'ECDH ', 'anon ', 'RC4_128 ', ' ', 'SHA'),
CipherSuite(0xc017, 'ECDH ', 'anon ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0xc018, 'ECDH ', 'anon ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0xc019, 'ECDH ', 'anon ', 'AES_256 ', 'CBC ', 'SHA'),
# RFC5054
CipherSuite(0xc01a, 'SRP_SHA', ' ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0xc01b, 'SRP_SHA', 'RSA ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0xc01c, 'SRP_SHA', 'DSS ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0xc01d, 'SRP_SHA', ' ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0xc01e, 'SRP_SHA', 'RSA ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0xc01f, 'SRP_SHA', 'DSS ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0xc020, 'SRP_SHA', ' ', 'AES_256 ', 'CBC ', 'SHA'),
CipherSuite(0xc021, 'SRP_SHA', 'RSA ', 'AES_256 ', 'CBC ', 'SHA'),
CipherSuite(0xc022, 'SRP_SHA', 'DSS ', 'AES_256 ', 'CBC ', 'SHA'),
# RFC5289
CipherSuite(0xc023, 'ECDHE', 'ECDSA ', 'AES_128 ', 'CBC ', 'SHA256'),
CipherSuite(0xc024, 'ECDHE', 'ECDSA ', 'AES_256 ', 'CBC ', 'SHA384'),
CipherSuite(0xc025, 'ECDH ', 'ECDSA ', 'AES_128 ', 'CBC ', 'SHA256'),
CipherSuite(0xc026, 'ECDH ', 'ECDSA ', 'AES_256 ', 'CBC ', 'SHA384'),
CipherSuite(0xc027, 'ECDHE', 'RSA ', 'AES_128 ', 'CBC ', 'SHA256'),
CipherSuite(0xc028, 'ECDHE', 'RSA ', 'AES_256 ', 'CBC ', 'SHA384'),
CipherSuite(0xc029, 'ECDH ', 'RSA ', 'AES_128 ', 'CBC ', 'SHA256'),
CipherSuite(0xc02a, 'ECDH ', 'RSA ', 'AES_256 ', 'CBC ', 'SHA384'),
CipherSuite(0xc02b, 'ECDHE', 'ECDSA ', 'AES_128 ', 'GCM ', 'SHA256'),
CipherSuite(0xc02c, 'ECDHE', 'ECDSA ', 'AES_256 ', 'GCM ', 'SHA384'),
CipherSuite(0xc02d, 'ECDH ', 'ECDSA ', 'AES_128 ', 'GCM ', 'SHA256'),
CipherSuite(0xc02e, 'ECDH ', 'ECDSA ', 'AES_256 ', 'GCM ', 'SHA384'),
CipherSuite(0xc02f, 'ECDHE', 'RSA ', 'AES_128 ', 'GCM ', 'SHA256'),
CipherSuite(0xc030, 'ECDHE', 'RSA ', 'AES_256 ', 'GCM ', 'SHA384'),
CipherSuite(0xc031, 'ECDH ', 'RSA ', 'AES_128 ', 'GCM ', 'SHA256'),
CipherSuite(0xc032, 'ECDH ', 'RSA ', 'AES_256 ', 'GCM ', 'SHA384'),
# RFC5489
CipherSuite(0xc033, 'ECDHE', 'PSK ', 'RC4_128 ', ' ', 'SHA'),
CipherSuite(0xc034, 'ECDHE', 'PSK ', '3DES_EDE', 'CBC ', 'SHA'),
CipherSuite(0xc035, 'ECDHE', 'PSK ', 'AES_128 ', 'CBC ', 'SHA'),
CipherSuite(0xc036, 'ECDHE', 'PSK ', 'AES_256 ', 'CBC ', 'SHA'),
CipherSuite(0xc037, 'ECDHE', 'PSK ', 'AES_128 ', 'CBC ', 'SHA256'),
CipherSuite(0xc038, 'ECDHE', 'PSK ', 'AES_256 ', 'CBC ', 'SHA384'),
CipherSuite(0xc039, 'ECDHE', 'PSK ', 'NULL ', ' ', 'SHA'),
CipherSuite(0xc03a, 'ECDHE', 'PSK ', 'NULL ', ' ', 'SHA256'),
CipherSuite(0xc03b, 'ECDHE', 'PSK ', 'NULL ', ' ', 'SHA384'),
# RFC6209
CipherSuite(0xc03c, 'RSA ', ' ', 'ARIA_128', 'CBC ', 'SHA256'),
CipherSuite(0xc03d, 'RSA ', ' ', 'ARIA_256', 'CBC ', 'SHA384'),
CipherSuite(0xc03e, 'DH ', 'DSS ', 'ARIA_128', 'CBC ', 'SHA256'),
CipherSuite(0xc03f, 'DH ', 'DSS ', 'ARIA_256', 'CBC ', 'SHA384'),
CipherSuite(0xc040, 'DH ', 'RSA ', 'ARIA_128', 'CBC ', 'SHA256'),
CipherSuite(0xc041, 'DH ', 'RSA ', 'ARIA_256', 'CBC ', 'SHA384'),
CipherSuite(0xc042, 'DHE ', 'DSS ', 'ARIA_128', 'CBC ', 'SHA256'),
CipherSuite(0xc043, 'DHE ', 'DSS ', 'ARIA_256', 'CBC ', 'SHA384'),
CipherSuite(0xc044, 'DHE ', 'RSA ', 'ARIA_128', 'CBC ', 'SHA256'),
CipherSuite(0xc045, 'DHE ', 'RSA ', 'ARIA_256', 'CBC ', 'SHA384'),
CipherSuite(0xc046, 'DH ', 'anon ', 'ARIA_128', 'CBC ', 'SHA256'),
CipherSuite(0xc047, 'DH ', 'anon ', 'ARIA_256', 'CBC ', 'SHA384'),
CipherSuite(0xc048, 'ECDHE', 'ECDSA ', 'ARIA_128', 'CBC ', 'SHA256'),
CipherSuite(0xc049, 'ECDHE', 'ECDSA ', 'ARIA_256', 'CBC ', 'SHA384'),
CipherSuite(0xc04a, 'ECDH ', 'ECDSA ', 'ARIA_128', 'CBC ', 'SHA256'),
CipherSuite(0xc04b, 'ECDH ', 'ECDSA ', 'ARIA_256', 'CBC ', 'SHA384'),
CipherSuite(0xc04c, 'ECDHE', 'RSA ', 'ARIA_128', 'CBC ', 'SHA256'),
CipherSuite(0xc04d, 'ECDHE', 'RSA ', 'ARIA_256', 'CBC ', 'SHA384'),
CipherSuite(0xc04e, 'ECDH ', 'RSA ', 'ARIA_128', 'CBC ', 'SHA256'),
CipherSuite(0xc04f, 'ECDH ', 'RSA ', 'ARIA_256', 'CBC ', 'SHA384'),
CipherSuite(0xc050, 'RSA ', ' ', 'ARIA_128', 'GCM ', 'SHA256'),
CipherSuite(0xc051, 'RSA ', ' ', 'ARIA_256', 'GCM ', 'SHA384'),
CipherSuite(0xc052, 'DHE ', 'RSA ', 'ARIA_128', 'GCM ', 'SHA256'),
CipherSuite(0xc053, 'DHE ', 'RSA ', 'ARIA_256', 'GCM ', 'SHA384'),
CipherSuite(0xc054, 'DH ', 'RSA ', 'ARIA_128', 'GCM ', 'SHA256'),
CipherSuite(0xc055, 'DH ', 'RSA ', 'ARIA_256', 'GCM ', 'SHA384'),
CipherSuite(0xc056, 'DHE ', 'DSS ', 'ARIA_128', 'GCM ', 'SHA256'),
CipherSuite(0xc057, 'DHE ', 'DSS ', 'ARIA_256', 'GCM ', 'SHA384'),
CipherSuite(0xc058, 'DH ', 'DSS ', 'ARIA_128', 'GCM ', 'SHA256'),
CipherSuite(0xc059, 'DH ', 'DSS ', 'ARIA_256', 'GCM ', 'SHA384'),
CipherSuite(0xc05a, 'DH ', 'anon ', 'ARIA_128', 'GCM ', 'SHA256'),
CipherSuite(0xc05b, 'DH ', 'anon ', 'ARIA_256', 'GCM ', 'SHA384'),
CipherSuite(0xc05c, 'ECDHE', 'ECDSA ', 'ARIA_128', 'GCM ', 'SHA256'),
CipherSuite(0xc05d, 'ECDHE', 'ECDSA ', 'ARIA_256', 'GCM ', 'SHA384'),
CipherSuite(0xc05e, 'ECDH ', 'ECDSA ', 'ARIA_128', 'GCM ', 'SHA256'),
CipherSuite(0xc05f, 'ECDH ', 'ECDSA ', 'ARIA_256', 'GCM ', 'SHA384'),
CipherSuite(0xc060, 'ECDHE', 'RSA ', 'ARIA_128', 'GCM ', 'SHA256'),
CipherSuite(0xc061, 'ECDHE', 'RSA ', 'ARIA_256', 'GCM ', 'SHA384'),
CipherSuite(0xc062, 'ECDH ', 'RSA ', 'ARIA_128', 'GCM ', 'SHA256'),
CipherSuite(0xc063, 'ECDH ', 'RSA ', 'ARIA_256', 'GCM ', 'SHA384'),
CipherSuite(0xc064, ' ', 'PSK ', 'ARIA_128', 'CBC ', 'SHA256'),
CipherSuite(0xc065, ' ', 'PSK ', 'ARIA_256', 'CBC ', 'SHA384'),
CipherSuite(0xc066, 'DHE ', 'PSK ', 'ARIA_128', 'CBC ', 'SHA256'),
CipherSuite(0xc067, 'DHE ', 'PSK ', 'ARIA_256', 'CBC ', 'SHA384'),
CipherSuite(0xc068, 'RSA ', 'PSK ', 'ARIA_128', 'CBC ', 'SHA256'),
CipherSuite(0xc069, 'RSA ', 'PSK ', 'ARIA_256', 'CBC ', 'SHA384'),
CipherSuite(0xc06a, ' ', 'PSK ', 'ARIA_128', 'GCM ', 'SHA256'),
CipherSuite(0xc06b, ' ', 'PSK ', 'ARIA_256', 'GCM ', 'SHA384'),
CipherSuite(0xc06c, 'DHE ', 'PSK ', 'ARIA_128', 'GCM ', 'SHA256'),
CipherSuite(0xc06d, 'DHE ', 'PSK ', 'ARIA_256', 'GCM ', 'SHA384'),
CipherSuite(0xc06e, 'RSA ', 'PSK ', 'ARIA_128', 'GCM ', 'SHA256'),
CipherSuite(0xc06f, 'RSA ', 'PSK ', 'ARIA_256', 'GCM ', 'SHA384'),
CipherSuite(0xc070, 'ECDHE', 'PSK ', 'ARIA_128', 'GCM ', 'SHA256'),
CipherSuite(0xc071, 'ECDHE', 'PSK ', 'ARIA_256', 'GCM ', 'SHA384'),
# RFC6367
CipherSuite(0xc072, 'ECDHE', 'ECDSA ', 'CAMELLIA_128', 'CBC', 'SHA256'),
CipherSuite(0xc073, 'ECDHE', 'ECDSA ', 'CAMELLIA_256', 'CBC', 'SHA384'),
CipherSuite(0xc074, 'ECDH ', 'ECDSA ', 'CAMELLIA_128', 'CBC', 'SHA256'),
CipherSuite(0xc075, 'ECDH ', 'ECDSA ', 'CAMELLIA_256', 'CBC', 'SHA384'),
CipherSuite(0xc076, 'ECDHE', 'RSA ', 'CAMELLIA_128', 'CBC', 'SHA256'),
CipherSuite(0xc077, 'ECDHE', 'RSA ', 'CAMELLIA_256', 'CBC', 'SHA384'),
CipherSuite(0xc078, 'ECDH ', 'RSA ', 'CAMELLIA_128', 'CBC', 'SHA256'),
CipherSuite(0xc079, 'ECDH ', 'RSA ', 'CAMELLIA_256', 'CBC', 'SHA384'),
CipherSuite(0xc07a, 'RSA ', ' ', 'CAMELLIA_128', 'GCM', 'SHA256'),
CipherSuite(0xc07b, 'RSA ', ' ', 'CAMELLIA_256', 'GCM', 'SHA384'),
CipherSuite(0xc07c, 'DHE ', 'RSA ', 'CAMELLIA_128', 'GCM', 'SHA256'),
CipherSuite(0xc07d, 'DHE ', 'RSA ', 'CAMELLIA_256', 'GCM', 'SHA384'),
CipherSuite(0xc07e, 'DH ', 'RSA ', 'CAMELLIA_128', 'GCM', 'SHA256'),
CipherSuite(0xc07f, 'DH ', 'RSA ', 'CAMELLIA_256', 'GCM', 'SHA384'),
CipherSuite(0xc080, 'DHE ', 'DSS ', 'CAMELLIA_128', 'GCM', 'SHA256'),
CipherSuite(0xc081, 'DHE ', 'DSS ', 'CAMELLIA_256', 'GCM', 'SHA384'),
CipherSuite(0xc082, 'DH ', 'DSS ', 'CAMELLIA_128', 'GCM', 'SHA256'),
CipherSuite(0xc083, 'DH ', 'DSS ', 'CAMELLIA_256', 'GCM', 'SHA384'),
CipherSuite(0xc084, 'DH ', 'anon ', 'CAMELLIA_128', 'GCM', 'SHA256'),
CipherSuite(0xc085, 'DH ', 'anon ', 'CAMELLIA_256', 'GCM', 'SHA384'),
CipherSuite(0xc086, 'ECDHE', 'ECDSA ', 'CAMELLIA_128', 'GCM', 'SHA256'),
CipherSuite(0xc087, 'ECDHE', 'ECDSA ', 'CAMELLIA_256', 'GCM', 'SHA384'),
CipherSuite(0xc088, 'ECDH ', 'ECDSA ', 'CAMELLIA_128', 'GCM', 'SHA256'),
CipherSuite(0xc089, 'ECDH ', 'ECDSA ', 'CAMELLIA_256', 'GCM', 'SHA384'),
CipherSuite(0xc08a, 'ECDHE', 'RSA ', 'CAMELLIA_128', 'GCM', 'SHA256'),
CipherSuite(0xc08b, 'ECDHE', 'RSA ', 'CAMELLIA_256', 'GCM', 'SHA384'),
CipherSuite(0xc08c, 'ECDH ', 'RSA ', 'CAMELLIA_128', 'GCM', 'SHA256'),
CipherSuite(0xc08d, 'ECDH ', 'RSA ', 'CAMELLIA_256', 'GCM', 'SHA384'),
CipherSuite(0xc08e, ' ', 'PSK ', 'CAMELLIA_128', 'GCM', 'SHA256'),
CipherSuite(0xc08f, ' ', 'PSK ', 'CAMELLIA_256', 'GCM', 'SHA384'),
CipherSuite(0xc090, 'DHE ', 'PSK ', 'CAMELLIA_128', 'GCM', 'SHA256'),
CipherSuite(0xc091, 'DHE ', 'PSK ', 'CAMELLIA_256', 'GCM', 'SHA384'),
CipherSuite(0xc092, 'RSA ', 'PSK ', 'CAMELLIA_128', 'GCM', 'SHA256'),
CipherSuite(0xc093, 'RSA ', 'PSK ', 'CAMELLIA_256', 'GCM', 'SHA384'),
CipherSuite(0xc094, ' ', 'PSK ', 'CAMELLIA_128', 'CBC', 'SHA256'),
CipherSuite(0xc095, ' ', 'PSK ', 'CAMELLIA_256', 'CBC', 'SHA384'),
CipherSuite(0xc096, 'DHE ', 'PSK ', 'CAMELLIA_128', 'CBC', 'SHA256'),
CipherSuite(0xc097, 'DHE ', 'PSK ', 'CAMELLIA_256', 'CBC', 'SHA384'),
CipherSuite(0xc098, 'RSA ', 'PSK ', 'CAMELLIA_128', 'CBC', 'SHA256'),
CipherSuite(0xc099, 'RSA ', 'PSK ', 'CAMELLIA_256', 'CBC', 'SHA384'),
CipherSuite(0xc09a, 'ECDHE', 'PSK ', 'CAMELLIA_128', 'CBC', 'SHA256'),
CipherSuite(0xc09b, 'ECDHE', 'PSK ', 'CAMELLIA_256', 'CBC', 'SHA384'),
# RFC6655
CipherSuite(0xc09c, 'RSA ', ' ', 'AES_128 ', 'CCM ', ''),
CipherSuite(0xc09d, 'RSA ', ' ', 'AES_256 ', 'CCM ', ''),
CipherSuite(0xc09e, 'DHE ', 'RSA ', 'AES_128 ', 'CCM ', ''),
CipherSuite(0xc09f, 'DHE ', 'RSA ', 'AES_256 ', 'CCM ', ''),
CipherSuite(0xc0a0, 'RSA ', ' ', 'AES_128 ', 'CCM_8', ''),
CipherSuite(0xc0a1, 'RSA ', ' ', 'AES_256 ', 'CCM_8', ''),
CipherSuite(0xc0a2, 'DHE ', 'RSA ', 'AES_128 ', 'CCM_8', ''),
CipherSuite(0xc0a3, 'DHE ', 'RSA ', 'AES_256 ', 'CCM_8', ''),
CipherSuite(0xc0a4, ' ', 'PSK ', 'AES_128 ', 'CCM ', ''),
CipherSuite(0xc0a5, ' ', 'PSK ', 'AES_256 ', 'CCM ', ''),
CipherSuite(0xc0a6, 'DHE ', 'PSK ', 'AES_128 ', 'CCM ', ''),
CipherSuite(0xc0a7, 'DHE ', 'PSK ', 'AES_256 ', 'CCM ', ''),
CipherSuite(0xc0a8, ' ', 'PSK ', 'AES_128 ', 'CCM_8', ''),
CipherSuite(0xc0a9, ' ', 'PSK ', 'AES_256 ', 'CCM_8', ''),
CipherSuite(0xc0aa, 'DHE ', 'PSK ', 'AES_128 ', 'CCM_8', ''),
CipherSuite(0xc0ab, 'DHE ', 'PSK ', 'AES_256 ', 'CCM_8', ''),
# RFC7251
CipherSuite(0xc0ac, 'ECDHE', 'ECDSA ', 'AES_128 ', 'CCM ', ''),
CipherSuite(0xc0ad, 'ECDHE', 'ECDSA ', 'AES_256 ', 'CCM ', ''),
CipherSuite(0xc0ae, 'ECDHE', 'ECDSA ', 'AES_128 ', 'CCM_8', ''),
CipherSuite(0xc0af, 'ECDHE', 'ECDSA ', 'AES_256 ', 'CCM_8', ''),
# Unassigned: 0xc0b0-0xcca7
CipherSuite(0xcc13, 'ECDHE', 'RSA ', 'CHACHA20', 'POLY1305', 'SHA256',
'OLD_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256'),
CipherSuite(0xcc14, 'ECDHE', 'ECDSA ', 'CHACHA20', 'POLY1305', 'SHA256',
'OLD_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256'),
CipherSuite(0xcc15, 'DHE ', 'RSA ', 'CHACHA20', 'POLY1305', 'SHA256',
'OLD_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256'),
# RFC7905
CipherSuite(0xcca8, 'ECDHE', 'RSA ', 'CHACHA20', 'POLY1305', 'SHA256'),
CipherSuite(0xcca9, 'ECDHE', 'ECDSA ', 'CHACHA20', 'POLY1305', 'SHA256'),
CipherSuite(0xccaa, 'DHE ', 'RSA ', 'CHACHA20', 'POLY1305', 'SHA256'),
CipherSuite(0xccab, ' ', 'PSK ', 'CHACHA20', 'POLY1305', 'SHA256'),
CipherSuite(0xccac, 'ECDHE', 'PSK ', 'CHACHA20', 'POLY1305', 'SHA256'),
CipherSuite(0xccad, 'DHE ', 'PSK ', 'CHACHA20', 'POLY1305', 'SHA256'),
CipherSuite(0xccae, 'RSA ', 'PSK ', 'CHACHA20', 'POLY1305', 'SHA256'),
# RFC8701 // GREASE (Generate Random Extensions And Sustain Extensibility)
CipherSuite(0x0a0a, '', '', '', '', '', 'GREASE'),
CipherSuite(0x1a1a, '', '', '', '', '', 'GREASE'),
CipherSuite(0x2a2a, '', '', '', '', '', 'GREASE'),
CipherSuite(0x3a3a, '', '', '', '', '', 'GREASE'),
CipherSuite(0x4a4a, '', '', '', '', '', 'GREASE'),
CipherSuite(0x5a5a, '', '', '', '', '', 'GREASE'),
CipherSuite(0x6a6a, '', '', '', '', '', 'GREASE'),
CipherSuite(0x7a7a, '', '', '', '', '', 'GREASE'),
CipherSuite(0x8a8a, '', '', '', '', '', 'GREASE'),
CipherSuite(0x9a9a, '', '', '', '', '', 'GREASE'),
CipherSuite(0xaaaa, '', '', '', '', '', 'GREASE'),
CipherSuite(0xbaba, '', '', '', '', '', 'GREASE'),
CipherSuite(0xcaca, '', '', '', '', '', 'GREASE'),
CipherSuite(0xdada, '', '', '', '', '', 'GREASE'),
CipherSuite(0xeaea, '', '', '', '', '', 'GREASE'),
CipherSuite(0xfafa, '', '', '', '', '', 'GREASE'),
# Unassigned: 0xccaf-0xfefd
# Reserved: 0xfefe-0xffff
]
BY_CODE = dict(
(cipher.code, cipher) for cipher in CIPHERSUITES)
# This is a function to avoid artificially increased coverage
BY_NAME_DICT = None
def BY_NAME(name):
# We initialize the dictionary only on the first call
global BY_NAME_DICT
if BY_NAME_DICT is None:
BY_NAME_DICT = dict((suite.name, suite) for suite in CIPHERSUITES)
return BY_NAME_DICT[name]
NULL_SUITE = BY_CODE[0x0000]
class TestCipherSuites(object):
def test_kx(self):
# A test from each RFC
assert (BY_CODE[0x0005].kx == 'RSA')
assert (BY_CODE[0x0021].kx == 'KRB5')
assert (BY_CODE[0x002d].kx == 'DHE')
assert (BY_CODE[0x0034].kx == 'DH')
assert (BY_CODE[0x003c].kx == 'RSA')
assert (BY_CODE[0x0042].kx == 'DH')
assert (BY_CODE[0x006a].kx == 'DHE')
assert (BY_CODE[0x0084].kx == 'RSA')
assert (BY_CODE[0x0091].kx == 'DHE')
assert (BY_CODE[0x0098].kx == 'DH')
assert (BY_CODE[0x00ab].kx == 'DHE')
assert (BY_CODE[0x00b0].kx == 'PSK')
assert (BY_CODE[0x00bb].kx == 'DH')
assert (BY_CODE[0xc008].kx == 'ECDHE')
assert (BY_CODE[0xc016].kx == 'ECDH')
assert (BY_CODE[0xc01d].kx == 'SRP_SHA')
assert (BY_CODE[0xc027].kx == 'ECDHE')
assert (BY_CODE[0xc036].kx == 'ECDHE')
assert (BY_CODE[0xc045].kx == 'DHE')
assert (BY_CODE[0xc052].kx == 'DHE')
assert (BY_CODE[0xc068].kx == 'RSA')
assert (BY_CODE[0xc074].kx == 'ECDH')
assert (BY_CODE[0xc08d].kx == 'ECDH')
assert (BY_CODE[0xc09d].kx == 'RSA')
assert (BY_CODE[0xc0a2].kx == 'DHE')
assert (BY_CODE[0xc0ad].kx == 'ECDHE')
assert (BY_CODE[0xcc13].kx == 'ECDHE')
assert (BY_CODE[0xcca8].kx == 'ECDHE')
assert (BY_CODE[0xccae].kx == 'RSA')
def test_auth(self):
# A test from each RFC
assert (BY_CODE[0x0005].auth == 'RSA')
assert (BY_CODE[0x0021].auth == 'KRB5')
assert (BY_CODE[0x002d].auth == 'PSK')
assert (BY_CODE[0x0034].auth == 'anon')
assert (BY_CODE[0x003c].auth == 'RSA')
assert (BY_CODE[0x0042].auth == 'DSS')
assert (BY_CODE[0x006a].auth == 'DSS')
assert (BY_CODE[0x0084].auth == 'RSA')
assert (BY_CODE[0x0091].auth == 'PSK')
assert (BY_CODE[0x0098].auth == 'RSA')
assert (BY_CODE[0x00ab].auth == 'PSK')
assert (BY_CODE[0x00b0].auth == 'PSK')
assert (BY_CODE[0x00bb].auth == 'DSS')
assert (BY_CODE[0xc008].auth == 'ECDSA')
assert (BY_CODE[0xc016].auth == 'anon')
assert (BY_CODE[0xc01d].auth == 'SRP_SHA')
assert (BY_CODE[0xc027].auth == 'RSA')
assert (BY_CODE[0xc036].auth == 'PSK')
assert (BY_CODE[0xc045].auth == 'RSA')
assert (BY_CODE[0xc052].auth == 'RSA')
assert (BY_CODE[0xc068].auth == 'PSK')
assert (BY_CODE[0xc074].auth == 'ECDSA')
assert (BY_CODE[0xc08d].auth == 'RSA')
assert (BY_CODE[0xc09d].auth == 'RSA')
assert (BY_CODE[0xc0a2].auth == 'RSA')
assert (BY_CODE[0xc0ad].auth == 'ECDSA')
assert (BY_CODE[0xcc14].auth == 'ECDSA')
assert (BY_CODE[0xcca8].auth == 'RSA')
assert (BY_CODE[0xccae].auth == 'PSK')
def test_pfs(self):
assert (BY_NAME('TLS_RSA_WITH_RC4_128_SHA').pfs is False)
assert (BY_NAME('TLS_DHE_DSS_WITH_AES_256_CBC_SHA256').pfs is True)
assert (BY_NAME('TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA').pfs is True)
def test_aead(self):
assert (BY_NAME('TLS_RSA_WITH_AES_128_CBC_SHA256').aead is False)
assert (BY_NAME('TLS_RSA_WITH_AES_256_CCM').aead is True)
assert (BY_NAME('TLS_DHE_RSA_WITH_AES_128_CCM_8').aead is True)
assert (BY_NAME('TLS_DHE_PSK_WITH_AES_256_GCM_SHA384').aead is True)
def test_anonymous(self):
assert (BY_NAME('TLS_RSA_WITH_RC4_128_SHA').anonymous is False)
assert (BY_NAME('TLS_DH_anon_WITH_AES_128_CBC_SHA').anonymous is True)
assert (BY_NAME('TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA').anonymous is True)
def test_by_name_and_code(self):
# Special cases:
# - explicit name
assert (BY_CODE[0x00ff] == BY_NAME('TLS_EMPTY_RENEGOTIATION_INFO'))
# - explicit encoding (DES_40 + CBC = DES_CBC_40)
assert (BY_CODE[0x0026] == BY_NAME('TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA'))
# A test from each RFC
assert (BY_CODE[0x0005] == BY_NAME('TLS_RSA_WITH_RC4_128_SHA'))
assert (BY_CODE[0x0021] == BY_NAME('TLS_KRB5_WITH_IDEA_CBC_SHA'))
assert (BY_CODE[0x002d] == BY_NAME('TLS_DHE_PSK_WITH_NULL_SHA'))
assert (BY_CODE[0x0034] == BY_NAME('TLS_DH_anon_WITH_AES_128_CBC_SHA'))
assert (BY_CODE[0x003c] == BY_NAME('TLS_RSA_WITH_AES_128_CBC_SHA256'))
assert (BY_CODE[0x0042] == BY_NAME('TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA'))
assert (BY_CODE[0x006a] == BY_NAME('TLS_DHE_DSS_WITH_AES_256_CBC_SHA256'))
assert (BY_CODE[0x0084] == BY_NAME('TLS_RSA_WITH_CAMELLIA_256_CBC_SHA'))
assert (BY_CODE[0x0091] == BY_NAME('TLS_DHE_PSK_WITH_AES_256_CBC_SHA'))
assert (BY_CODE[0x0098] == BY_NAME('TLS_DH_RSA_WITH_SEED_CBC_SHA'))
assert (BY_CODE[0x00ab] == BY_NAME('TLS_DHE_PSK_WITH_AES_256_GCM_SHA384'))
assert (BY_CODE[0x00b0] == BY_NAME('TLS_PSK_WITH_NULL_SHA256'))
assert (BY_CODE[0x00bb] == BY_NAME('TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256'))
assert (BY_CODE[0x1303] == BY_NAME('TLS_CHACHA20_POLY1305_SHA256'))
assert (BY_CODE[0xc008] == BY_NAME('TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA'))
assert (BY_CODE[0xc016] == BY_NAME('TLS_ECDH_anon_WITH_RC4_128_SHA'))
assert (BY_CODE[0xc01d] == BY_NAME('TLS_SRP_SHA_WITH_AES_128_CBC_SHA'))
assert (BY_CODE[0xc027] == BY_NAME('TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256'))
assert (BY_CODE[0xc036] == BY_NAME('TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA'))
assert (BY_CODE[0xc045] == BY_NAME('TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384'))
assert (BY_CODE[0xc052] == BY_NAME('TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256'))
assert (BY_CODE[0xc068] == BY_NAME('TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256'))
assert (BY_CODE[0xc074] == BY_NAME('TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256'))
assert (BY_CODE[0xc08d] == BY_NAME('TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384'))
assert (BY_CODE[0xc09d] == BY_NAME('TLS_RSA_WITH_AES_256_CCM'))
assert (BY_CODE[0xc0a2] == BY_NAME('TLS_DHE_RSA_WITH_AES_128_CCM_8'))
assert (BY_CODE[0xc0ad] == BY_NAME('TLS_ECDHE_ECDSA_WITH_AES_256_CCM'))
assert (BY_CODE[0xcca8] == BY_NAME('TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256'))
assert (BY_CODE[0xccae] == BY_NAME('TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256'))
assert (BY_CODE[0xcc15] == BY_NAME('OLD_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256'))
def test_repr(self):
cs = CipherSuite(0x0009, 'RSA', ' ', 'DES ', 'CBC ', 'SHA')
assert repr(cs) == "CipherSuite(0x0009, TLS_RSA_WITH_DES_CBC_SHA)"
assert cs.mac_size == 20
assert cs.block_size == 8
repr(BY_CODE[0x6a6a]) == "CipherSuite(0x6a6a, GREASE)"
| 38,198 | 51.042234 | 115 |
py
|
dpkt
|
dpkt-master/dpkt/radiotap.py
|
# -*- coding: utf-8 -*-
"""Radiotap"""
from __future__ import print_function
from __future__ import absolute_import
from . import dpkt
from . import ieee80211
from .compat import compat_ord
# Ref: http://www.radiotap.org
# Fields Ref: http://www.radiotap.org/defined-fields/all
# Present flags
_TSFT_SHIFT = 0
_FLAGS_SHIFT = 1
_RATE_SHIFT = 2
_CHANNEL_SHIFT = 3
_FHSS_SHIFT = 4
_ANT_SIG_SHIFT = 5
_ANT_NOISE_SHIFT = 6
_LOCK_QUAL_SHIFT = 7
_TX_ATTN_SHIFT = 8
_DB_TX_ATTN_SHIFT = 9
_DBM_TX_POWER_SHIFT = 10
_ANTENNA_SHIFT = 11
_DB_ANT_SIG_SHIFT = 12
_DB_ANT_NOISE_SHIFT = 13
_RX_FLAGS_SHIFT = 14
_CHANNELPLUS_SHIFT = 18
_EXT_SHIFT = 31
# Flags elements
_FLAGS_SIZE = 2
_CFP_FLAG_SHIFT = 0
_PREAMBLE_SHIFT = 1
_WEP_SHIFT = 2
_FRAG_SHIFT = 3
_FCS_SHIFT = 4
_DATA_PAD_SHIFT = 5
_BAD_FCS_SHIFT = 6
_SHORT_GI_SHIFT = 7
# Channel type
_CHAN_TYPE_SIZE = 4
_CHANNEL_TYPE_SHIFT = 4
_CCK_SHIFT = 5
_OFDM_SHIFT = 6
_TWO_GHZ_SHIFT = 7
_FIVE_GHZ_SHIFT = 8
_PASSIVE_SHIFT = 9
_DYN_CCK_OFDM_SHIFT = 10
_GFSK_SHIFT = 11
_GSM_SHIFT = 12
_STATIC_TURBO_SHIFT = 13
_HALF_RATE_SHIFT = 14
_QUARTER_RATE_SHIFT = 15
# Flags offsets and masks
_FCS_MASK = 0x10
class Radiotap(dpkt.Packet):
"""Radiotap.
Attributes:
__hdr__: Header fields of Radiotap.
version: (int): Version (1 byte)
pad: (int): Padding (1 byte)
length: (int): Length (2 bytes)
"""
__hdr__ = (
('version', 'B', 0),
('pad', 'B', 0),
('length', 'H', 0),
)
__byte_order__ = '<'
def _is_present(self, bit):
index = bit // 8
mask = 1 << (bit % 8)
return 1 if self.present_flags[index] & mask else 0
def _set_bit(self, bit, val):
# present_flags is a bytearray, this gets the element
index = bit // 8
# mask retrieves every bit except our one
mask = ~(1 << (bit % 8) & 0xff)
# retrieve all of the bits, then or in the val at the appropriate place
# as the mask does not return the value at `bit`, if `val` is zero, the bit remains zero
self.present_flags[index] = (self.present_flags[index] & mask) | (val << bit % 8)
@property
def tsft_present(self):
return self._is_present(_TSFT_SHIFT)
@tsft_present.setter
def tsft_present(self, val):
self._set_bit(_TSFT_SHIFT, val)
@property
def flags_present(self):
return self._is_present(_FLAGS_SHIFT)
@flags_present.setter
def flags_present(self, val):
self._set_bit(_FLAGS_SHIFT, val)
@property
def rate_present(self):
return self._is_present(_RATE_SHIFT)
@rate_present.setter
def rate_present(self, val):
self._set_bit(_RATE_SHIFT, val)
@property
def channel_present(self):
return self._is_present(_CHANNEL_SHIFT)
@channel_present.setter
def channel_present(self, val):
self._set_bit(_CHANNEL_SHIFT, val)
@property
def fhss_present(self):
return self._is_present(_FHSS_SHIFT)
@fhss_present.setter
def fhss_present(self, val):
self._set_bit(_FHSS_SHIFT, val)
@property
def ant_sig_present(self):
return self._is_present(_ANT_SIG_SHIFT)
@ant_sig_present.setter
def ant_sig_present(self, val):
self._set_bit(_ANT_SIG_SHIFT, val)
@property
def ant_noise_present(self):
return self._is_present(_ANT_NOISE_SHIFT)
@ant_noise_present.setter
def ant_noise_present(self, val):
self._set_bit(_ANT_NOISE_SHIFT, val)
@property
def lock_qual_present(self):
return self._is_present(_LOCK_QUAL_SHIFT)
@lock_qual_present.setter
def lock_qual_present(self, val):
self._set_bit(_LOCK_QUAL_SHIFT, val)
@property
def tx_attn_present(self):
return self._is_present(_TX_ATTN_SHIFT)
@tx_attn_present.setter
def tx_attn_present(self, val):
self._set_bit(_TX_ATTN_SHIFT, val)
@property
def db_tx_attn_present(self):
return self._is_present(_DB_TX_ATTN_SHIFT)
@db_tx_attn_present.setter
def db_tx_attn_present(self, val):
self._set_bit(_DB_TX_ATTN_SHIFT, val)
@property
def dbm_tx_power_present(self):
return self._is_present(_DBM_TX_POWER_SHIFT)
@dbm_tx_power_present.setter
def dbm_tx_power_present(self, val):
self._set_bit(_DBM_TX_POWER_SHIFT, val)
@property
def ant_present(self):
return self._is_present(_ANTENNA_SHIFT)
@ant_present.setter
def ant_present(self, val):
self._set_bit(_ANTENNA_SHIFT, val)
@property
def db_ant_sig_present(self):
return self._is_present(_DB_ANT_SIG_SHIFT)
@db_ant_sig_present.setter
def db_ant_sig_present(self, val):
self._set_bit(_DB_ANT_SIG_SHIFT, val)
@property
def db_ant_noise_present(self):
return self._is_present(_DB_ANT_NOISE_SHIFT)
@db_ant_noise_present.setter
def db_ant_noise_present(self, val):
self._set_bit(_DB_ANT_NOISE_SHIFT, val)
@property
def rx_flags_present(self):
return self._is_present(_RX_FLAGS_SHIFT)
@rx_flags_present.setter
def rx_flags_present(self, val):
self._set_bit(_RX_FLAGS_SHIFT, val)
@property
def chanplus_present(self):
return self._is_present(_CHANNELPLUS_SHIFT)
@chanplus_present.setter
def chanplus_present(self, val):
self._set_bit(_CHANNELPLUS_SHIFT, val)
def unpack(self, buf):
dpkt.Packet.unpack(self, buf)
self.data = buf[self.length:]
self.fields = []
buf = buf[self.__hdr_len__:]
self.present_flags = bytearray(buf[:4])
buf = buf[4:]
ext_bit = _EXT_SHIFT
while self._is_present(ext_bit):
self.present_flags += bytearray(buf[:4])
buf = buf[4:]
ext_bit += 32
# decode each field into self.<name> (eg. self.tsft) as well as append it self.fields list
field_decoder = [
('tsft', self.tsft_present, self.TSFT),
('flags', self.flags_present, self.Flags),
('rate', self.rate_present, self.Rate),
('channel', self.channel_present, self.Channel),
('fhss', self.fhss_present, self.FHSS),
('ant_sig', self.ant_sig_present, self.AntennaSignal),
('ant_noise', self.ant_noise_present, self.AntennaNoise),
('lock_qual', self.lock_qual_present, self.LockQuality),
('tx_attn', self.tx_attn_present, self.TxAttenuation),
('db_tx_attn', self.db_tx_attn_present, self.DbTxAttenuation),
('dbm_tx_power', self.dbm_tx_power_present, self.DbmTxPower),
('ant', self.ant_present, self.Antenna),
('db_ant_sig', self.db_ant_sig_present, self.DbAntennaSignal),
('db_ant_noise', self.db_ant_noise_present, self.DbAntennaNoise),
('rx_flags', self.rx_flags_present, self.RxFlags),
('chanplus', self.chanplus_present, self.ChannelPlus)
]
offset = self.__hdr_len__ + len(self.present_flags)
for name, present_bit, parser in field_decoder:
if present_bit:
ali = parser.__alignment__
if ali > 1 and offset % ali:
padding = ali - offset % ali
buf = buf[padding:]
offset += padding
field = parser(buf)
field.data = b''
setattr(self, name, field)
self.fields.append(field)
buf = buf[len(field):]
offset += len(field)
if len(self.data) > 0:
if self.flags_present and self.flags.fcs:
self.data = ieee80211.IEEE80211(self.data, fcs=self.flags.fcs)
else:
self.data = ieee80211.IEEE80211(self.data)
class RadiotapField(dpkt.Packet):
__alignment__ = 1
__byte_order__ = '<'
class Antenna(RadiotapField):
__hdr__ = (
('index', 'B', 0),
)
class AntennaNoise(RadiotapField):
__hdr__ = (
('db', 'b', 0),
)
class AntennaSignal(RadiotapField):
__hdr__ = (
('db', 'b', 0),
)
class Channel(RadiotapField):
__alignment__ = 2
__hdr__ = (
('freq', 'H', 0),
('flags', 'H', 0),
)
class FHSS(RadiotapField):
__hdr__ = (
('set', 'B', 0),
('pattern', 'B', 0),
)
class Flags(RadiotapField):
__hdr__ = (
('val', 'B', 0),
)
@property
def fcs(self):
return (self.val & _FCS_MASK) >> _FCS_SHIFT
@fcs.setter
def fcs(self, v):
self.val = (v << _FCS_SHIFT) | (v & ~_FCS_MASK)
class LockQuality(RadiotapField):
__alignment__ = 2
__hdr__ = (
('val', 'H', 0),
)
class RxFlags(RadiotapField):
__alignment__ = 2
__hdr__ = (
('val', 'H', 0),
)
class Rate(RadiotapField):
__hdr__ = (
('val', 'B', 0),
)
class TSFT(RadiotapField):
__alignment__ = 8
__hdr__ = (
('usecs', 'Q', 0),
)
class TxAttenuation(RadiotapField):
__alignment__ = 2
__hdr__ = (
('val', 'H', 0),
)
class DbTxAttenuation(RadiotapField):
__alignment__ = 2
__hdr__ = (
('db', 'H', 0),
)
class DbAntennaNoise(RadiotapField):
__hdr__ = (
('db', 'B', 0),
)
class DbAntennaSignal(RadiotapField):
__hdr__ = (
('db', 'B', 0),
)
class DbmTxPower(RadiotapField):
__hdr__ = (
('dbm', 'B', 0),
)
class ChannelPlus(RadiotapField):
__alignment__ = 4
__hdr__ = (
('flags', 'I', 0),
('freq', 'H', 0),
('channel', 'B', 0),
('maxpower', 'B', 0),
)
def test_radiotap_1():
s = b'\x00\x00\x00\x18\x6e\x48\x00\x00\x00\x02\x6c\x09\xa0\x00\xa8\x81\x02\x00\x00\x00\x00\x00\x00\x00'
rad = Radiotap(s)
assert(rad.version == 0)
assert(rad.present_flags == b'\x6e\x48\x00\x00')
assert(rad.tsft_present == 0)
assert(rad.flags_present == 1)
assert(rad.rate_present == 1)
assert(rad.channel_present == 1)
assert(rad.fhss_present == 0)
assert(rad.ant_sig_present == 1)
assert(rad.ant_noise_present == 1)
assert(rad.lock_qual_present == 0)
assert(rad.db_tx_attn_present == 0)
assert(rad.dbm_tx_power_present == 0)
assert(rad.ant_present == 1)
assert(rad.db_ant_sig_present == 0)
assert(rad.db_ant_noise_present == 0)
assert(rad.rx_flags_present == 1)
assert(rad.channel.freq == 0x096c)
assert(rad.channel.flags == 0xa0)
assert(len(rad.fields) == 7)
def test_radiotap_2():
s = (b'\x00\x00\x30\x00\x2f\x40\x00\xa0\x20\x08\x00\xa0\x20\x08\x00\xa0\x20\x08\x00\x00\x00\x00'
b'\x00\x00\x08\x84\xbd\xac\x28\x00\x00\x00\x10\x02\x85\x09\xa0\x00\xa5\x00\x00\x00\xa1\x00'
b'\x9f\x01\xa1\x02')
rad = Radiotap(s)
assert(rad.version == 0)
assert(rad.present_flags == b'\x2f\x40\x00\xa0\x20\x08\x00\xa0\x20\x08\x00\xa0\x20\x08\x00\x00')
assert(rad.tsft_present)
assert(rad.flags_present)
assert(rad.rate_present)
assert(rad.channel_present)
assert(not rad.fhss_present)
assert(rad.ant_sig_present)
assert(not rad.ant_noise_present)
assert(not rad.lock_qual_present)
assert(not rad.db_tx_attn_present)
assert(not rad.dbm_tx_power_present)
assert(not rad.ant_present)
assert(not rad.db_ant_sig_present)
assert(not rad.db_ant_noise_present)
assert(rad.rx_flags_present)
assert(rad.channel.freq == 2437)
assert(rad.channel.flags == 0x00a0)
assert(len(rad.fields) == 6)
assert(rad.flags_present)
assert(rad.flags.fcs)
def test_fcs():
s = b'\x00\x00\x1a\x00\x2f\x48\x00\x00\x34\x8f\x71\x09\x00\x00\x00\x00\x10\x0c\x85\x09\xc0\x00\xcc\x01\x00\x00'
rt = Radiotap(s)
assert(rt.flags_present == 1)
assert(rt.flags.fcs == 1)
def test_radiotap_3(): # xchannel aka channel plus field
s = (
b'\x00\x00\x20\x00\x67\x08\x04\x00\x84\x84\x66\x25\x00\x00\x00\x00\x22\x0c\xd6\xa0\x01\x00\x00\x00\x40'
b'\x01\x00\x00\x3c\x14\x24\x11\x08\x02\x00\x00\xff\xff\xff\xff\xff\xff\x06\x03\x7f\x07\xa0\x16\x00\x19'
b'\xe3\xd3\x53\x52\x00\x8e\xaa\xaa\x03\x00\x00\x00\x08\x06\x00\x01\x08\x00\x06\x04\x00\x01\x00\x19\xe3'
b'\xd3\x53\x52\xa9\xfe\xf7\x00\x00\x00\x00\x00\x00\x00\x4f\x67\x32\x38'
)
rt = Radiotap(s)
assert rt.ant_noise.db == -96
assert rt.ant_sig.db == -42
assert rt.ant.index == 1
assert rt.chanplus_present
assert rt.chanplus.flags == 0x140
assert rt.chanplus.freq == 5180
assert rt.chanplus.channel == 36
assert rt.chanplus.maxpower == 17
assert len(rt.fields) == 7
assert repr(rt.data).startswith('IEEE80211')
def test_radiotap_properties():
from binascii import unhexlify
buf = unhexlify(
'00'
'00'
'0018'
'0000000000000000000000000000000000000000'
)
radiotap = Radiotap(buf)
property_keys = [
'tsft', 'flags', 'rate', 'channel', 'fhss', 'ant_sig', 'ant_noise',
'lock_qual', 'tx_attn', 'db_tx_attn', 'dbm_tx_power', 'ant',
'db_ant_sig', 'db_ant_noise', 'rx_flags', 'chanplus'
]
for prop in [key + '_present' for key in property_keys]:
print(prop)
assert hasattr(radiotap, prop)
assert getattr(radiotap, prop) == 0
setattr(radiotap, prop, 1)
assert getattr(radiotap, prop) == 1
setattr(radiotap, prop, 0)
assert getattr(radiotap, prop) == 0
def test_radiotap_unpack_fcs():
from binascii import unhexlify
buf = unhexlify(
'00' # version
'00' # pad
'1800' # length
'6e48000011026c09a000a8810200000000000000'
'd40000000012f0b61ca4ffffffff'
)
radiotap = Radiotap(buf)
assert radiotap.data.fcs_present == 1
def test_flags():
flags = Radiotap.Flags(b'\x00')
assert flags.fcs == 0
flags.fcs = 1
assert flags.fcs == 1
| 14,279 | 27.05501 | 115 |
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.