language
stringclasses 6
values | original_string
stringlengths 25
887k
| text
stringlengths 25
887k
|
---|---|---|
Python | def wrap_fhir_resource(
entry: dict,
patient_time: Union[str, datetime.datetime],
environment_id: Optional[Union[str, int]] = None,
interactions: Optional[List[str]] = None,
simulation_step: Optional[int] = None,
tag: Optional[str] = None,
raw_fhir_resource: Optional[dict] = None,
) -> dict:
"""Wraps a minimal record 'entry' into a dictionary that
can be used in the PatientRecordEntry constructor.
Parameters
----------
entry : dict
Entry to be wrapped, written in the internal patient record
representation. Must at least have "name", "start" and "resource_type"
keys. Please refer to the notebook 'patient-agent.ipynb' in the
notebooks folder for an example.
patient_time : Union[str, datetime.datetime]
The timestamp in patient-time attached to the entry
environment_id : Optional[Union[str, int]], optional
If the entry was the result of interacting with an environment, this
is the environment_id, by default None
interactions : Optional[List[str]], optional
If the entry was the result of interacting with an environment, this
is the list of interactions that were used in that process,
by default None
simulation_step : Optional[int], optional
If this entry was generated in a simulation, this is the simulation
step that produced the entry (useful if, say, a simulation step
generated multiple entries), by default None
tag : Optional[str], optional
A human-readable name for the the entry, by default None
raw_fhir_resource : Optional[dict], optional
Additional raw FHIR data that can be added to the entry when
converting to FHIR, by default None
Returns
-------
dict
A wrapped entry
"""
return {
"patient_time": string_to_datetime(patient_time),
"environment_id": environment_id,
"interactions": interactions,
"simulation_step": simulation_step,
"fhir_resource_time": string_to_datetime(entry.get("start")),
"fhir_resource_type": entry["resource_type"],
"fhir_resource": copy.deepcopy(raw_fhir_resource),
"entry": copy.deepcopy(entry),
"tag": tag,
} | def wrap_fhir_resource(
entry: dict,
patient_time: Union[str, datetime.datetime],
environment_id: Optional[Union[str, int]] = None,
interactions: Optional[List[str]] = None,
simulation_step: Optional[int] = None,
tag: Optional[str] = None,
raw_fhir_resource: Optional[dict] = None,
) -> dict:
"""Wraps a minimal record 'entry' into a dictionary that
can be used in the PatientRecordEntry constructor.
Parameters
----------
entry : dict
Entry to be wrapped, written in the internal patient record
representation. Must at least have "name", "start" and "resource_type"
keys. Please refer to the notebook 'patient-agent.ipynb' in the
notebooks folder for an example.
patient_time : Union[str, datetime.datetime]
The timestamp in patient-time attached to the entry
environment_id : Optional[Union[str, int]], optional
If the entry was the result of interacting with an environment, this
is the environment_id, by default None
interactions : Optional[List[str]], optional
If the entry was the result of interacting with an environment, this
is the list of interactions that were used in that process,
by default None
simulation_step : Optional[int], optional
If this entry was generated in a simulation, this is the simulation
step that produced the entry (useful if, say, a simulation step
generated multiple entries), by default None
tag : Optional[str], optional
A human-readable name for the the entry, by default None
raw_fhir_resource : Optional[dict], optional
Additional raw FHIR data that can be added to the entry when
converting to FHIR, by default None
Returns
-------
dict
A wrapped entry
"""
return {
"patient_time": string_to_datetime(patient_time),
"environment_id": environment_id,
"interactions": interactions,
"simulation_step": simulation_step,
"fhir_resource_time": string_to_datetime(entry.get("start")),
"fhir_resource_type": entry["resource_type"],
"fhir_resource": copy.deepcopy(raw_fhir_resource),
"entry": copy.deepcopy(entry),
"tag": tag,
} |
Python | def update(
self,
record: List[dict],
skip_existing: bool = False,
logger: logging.Logger = None,
print_: bool = False,
wrapped: bool = True,
) -> None:
"""Update patient record by appending new 'record' entries and perform
validation (deduplication and time order checking) and
subsequently updating conditions, medications and actions numbers.
The record contains 'entries' which each get converted into
PatientRecordEntry objects. To facilitate this, they may need to be
wrapped by wrap_fhir_resource. An unwrapped entry (or equivalently,
the 'entry' field of a wrapped entry)
must at least have the following fields: "name", "start" and
"resource_type". Please refer to the notebook 'patient-agent.ipynb'
in the notebooks folder for an example.
Parameters
----------
record : List[dict]
The new list of entries to append to existing patient record
skip_existing : bool, optional
Whether to skip the addition of new record entries if they are
already present in the existing patient record (deduplication),
by default False
logger : logging.Logger, optional
Logger to write validation messages to, by default None
print_ : bool, optional
Whether to also print validation messages, by default False
wrapped : bool, optional
Whether each entry in record should be wrapped using
wrap_fhir_resource
"""
self._update_record(record, skip_existing, logger, print_, wrapped)
for attrs_name in self._dataframe_attributes_to_kwargs:
self._update_dataframe_attributes(
attrs_name, self.record, getattr(self, attrs_name)
) | def update(
self,
record: List[dict],
skip_existing: bool = False,
logger: logging.Logger = None,
print_: bool = False,
wrapped: bool = True,
) -> None:
"""Update patient record by appending new 'record' entries and perform
validation (deduplication and time order checking) and
subsequently updating conditions, medications and actions numbers.
The record contains 'entries' which each get converted into
PatientRecordEntry objects. To facilitate this, they may need to be
wrapped by wrap_fhir_resource. An unwrapped entry (or equivalently,
the 'entry' field of a wrapped entry)
must at least have the following fields: "name", "start" and
"resource_type". Please refer to the notebook 'patient-agent.ipynb'
in the notebooks folder for an example.
Parameters
----------
record : List[dict]
The new list of entries to append to existing patient record
skip_existing : bool, optional
Whether to skip the addition of new record entries if they are
already present in the existing patient record (deduplication),
by default False
logger : logging.Logger, optional
Logger to write validation messages to, by default None
print_ : bool, optional
Whether to also print validation messages, by default False
wrapped : bool, optional
Whether each entry in record should be wrapped using
wrap_fhir_resource
"""
self._update_record(record, skip_existing, logger, print_, wrapped)
for attrs_name in self._dataframe_attributes_to_kwargs:
self._update_dataframe_attributes(
attrs_name, self.record, getattr(self, attrs_name)
) |
Python | def from_fhir(
cls,
fhir_data: Union[dict, str, Path],
resource_type: str = "Bundle",
patient_id: Optional[Union[int, str]] = None,
start_time: Optional[Union[str, datetime.datetime]] = None,
**kwargs,
) -> PatientAgentType:
"""Load patient from FHIR resource data.
The data can either be a Patient resource, or a Bundle resource
containing a Patient resource. The Patient resource is used to
populate the patient profile, and (if Bundle) any other resources
form populate the patient record.
Parameters
----------
fhir_data : Union[dict, str, Path]
A dictionary contaning FHIR data, or a path to FHIR data
resource_type : str, optional
The resource type of the FHIR data, by default "Bundle"
patient_id : Optional[Union[int, str]], optional
Patient ID, if not set then is taken from the
FHIR data Patient resource, by default None
start_time : Optional[str, datetime.datetime], optional
Start time in patient perspective, automatically set to
datetime.datetime.now() if not supplied, by default None
Returns
-------
PatientAgent
An instance of the PatientAgent class loaded from fhir_data.
Raises
------
ValueError
If more than one Patient resource is detected in fhir_data
KeyError
If any key in input kwargs has prefix patient__. This is not
allowed here as all patient resource data should be contained in
fhir_data
ValueError
If a non-None patient_id is passed as an argument, then a
consistency check is made against the patient resource 'id' field
in fhir_data. An error is raised if there is a mismatch
"""
if start_time is None:
start_time = datetime.datetime.now(datetime.timezone.utc)
created_at = start_time
else:
created_at = None
if isinstance(fhir_data, str) or isinstance(fhir_data, Path):
data = FHIRHandler().load(
fhir_data,
input_resource_type=resource_type,
output_resource_type=resource_type,
)
else:
data = fhir_data
if isinstance(data, dict):
if resource_type == "Bundle":
data = [item["resource"] for item in data["entry"]]
else:
data = [data]
try:
[patient_resource] = [
item for item in data if item["resourceType"] == "Patient"
]
except ValueError:
raise ValueError("There must be one patient resource")
record_unwrapped = [
convert_fhir_to_patient_record_entry(
item,
)
for item in data
if item["resourceType"] != "Patient"
]
record = [
wrap_fhir_resource(
item,
patient_time=item["start"]
if item.get("end") is None
else item["end"],
)
for item in record_unwrapped
]
patient_kwargs = {
f"patient__{key}": value
for key, value in patient_resource.items()
if key not in ["resourceType", "id", "gender", "birthDate"]
}
for key in kwargs:
if key.startswith("patient__"):
raise KeyError(
"The keys of the kwargs supplied to the `from_fhir` "
"method should not start with 'patient_'. This prefix is "
"reserved for FHIR patient resource field names. "
"If you want to add a field to the FHIR patient resource, "
"then this must be included in `fhir_path`. Other "
"PatientAgent attributes may be added through `kwargs`, "
"but their attribute names cannot start with 'patient_'."
)
kwargs = {**patient_kwargs, **kwargs}
if patient_id is not None:
if patient_id != patient_resource["id"]:
raise ValueError(
f"patient_id={patient_id} does not match value"
f"{patient_resource['id']} from the input FHIR data."
)
return cls(
patient_id=(
patient_resource["id"] if patient_id is None else patient_id
),
gender=patient_resource["gender"],
birth_date=patient_resource["birthDate"],
record=record,
start_time=start_time,
created_at=created_at,
**kwargs,
) | def from_fhir(
cls,
fhir_data: Union[dict, str, Path],
resource_type: str = "Bundle",
patient_id: Optional[Union[int, str]] = None,
start_time: Optional[Union[str, datetime.datetime]] = None,
**kwargs,
) -> PatientAgentType:
"""Load patient from FHIR resource data.
The data can either be a Patient resource, or a Bundle resource
containing a Patient resource. The Patient resource is used to
populate the patient profile, and (if Bundle) any other resources
form populate the patient record.
Parameters
----------
fhir_data : Union[dict, str, Path]
A dictionary contaning FHIR data, or a path to FHIR data
resource_type : str, optional
The resource type of the FHIR data, by default "Bundle"
patient_id : Optional[Union[int, str]], optional
Patient ID, if not set then is taken from the
FHIR data Patient resource, by default None
start_time : Optional[str, datetime.datetime], optional
Start time in patient perspective, automatically set to
datetime.datetime.now() if not supplied, by default None
Returns
-------
PatientAgent
An instance of the PatientAgent class loaded from fhir_data.
Raises
------
ValueError
If more than one Patient resource is detected in fhir_data
KeyError
If any key in input kwargs has prefix patient__. This is not
allowed here as all patient resource data should be contained in
fhir_data
ValueError
If a non-None patient_id is passed as an argument, then a
consistency check is made against the patient resource 'id' field
in fhir_data. An error is raised if there is a mismatch
"""
if start_time is None:
start_time = datetime.datetime.now(datetime.timezone.utc)
created_at = start_time
else:
created_at = None
if isinstance(fhir_data, str) or isinstance(fhir_data, Path):
data = FHIRHandler().load(
fhir_data,
input_resource_type=resource_type,
output_resource_type=resource_type,
)
else:
data = fhir_data
if isinstance(data, dict):
if resource_type == "Bundle":
data = [item["resource"] for item in data["entry"]]
else:
data = [data]
try:
[patient_resource] = [
item for item in data if item["resourceType"] == "Patient"
]
except ValueError:
raise ValueError("There must be one patient resource")
record_unwrapped = [
convert_fhir_to_patient_record_entry(
item,
)
for item in data
if item["resourceType"] != "Patient"
]
record = [
wrap_fhir_resource(
item,
patient_time=item["start"]
if item.get("end") is None
else item["end"],
)
for item in record_unwrapped
]
patient_kwargs = {
f"patient__{key}": value
for key, value in patient_resource.items()
if key not in ["resourceType", "id", "gender", "birthDate"]
}
for key in kwargs:
if key.startswith("patient__"):
raise KeyError(
"The keys of the kwargs supplied to the `from_fhir` "
"method should not start with 'patient_'. This prefix is "
"reserved for FHIR patient resource field names. "
"If you want to add a field to the FHIR patient resource, "
"then this must be included in `fhir_path`. Other "
"PatientAgent attributes may be added through `kwargs`, "
"but their attribute names cannot start with 'patient_'."
)
kwargs = {**patient_kwargs, **kwargs}
if patient_id is not None:
if patient_id != patient_resource["id"]:
raise ValueError(
f"patient_id={patient_id} does not match value"
f"{patient_resource['id']} from the input FHIR data."
)
return cls(
patient_id=(
patient_resource["id"] if patient_id is None else patient_id
),
gender=patient_resource["gender"],
birth_date=patient_resource["birthDate"],
record=record,
start_time=start_time,
created_at=created_at,
**kwargs,
) |
Python | def string_to_datetime(
dt: Optional[Union[str, datetime.datetime]],
format_str: Optional[str] = None,
tzinfo: datetime.timezone = datetime.timezone.utc,
) -> datetime.datetime:
"""Convert datetime-like string as datetime object. The default settings
uses dateutil to automatically parse the string, and returns
a datetime object with timezone datetime.timezone.utc
Parameters
----------
dt : Optional[Union[str, datetime.datetime]]
Datetime string. If dt is already a datetime object or None, returns
dt
format_str : Optional[str], optional
Input format string, by default None
tzinfo : datetime.timezone, optional
Timezone, by default datetime.timezone.utc
Returns
-------
datetime.datetime
Returns dt as a datetime object
"""
if dt is None or isinstance(dt, datetime.datetime):
return dt
if format_str is None:
return dateutil.parser.parse(dt).astimezone(UTC)
else:
return datetime.datetime.strptime(dt, format_str, tzinfo=tzinfo) | def string_to_datetime(
dt: Optional[Union[str, datetime.datetime]],
format_str: Optional[str] = None,
tzinfo: datetime.timezone = datetime.timezone.utc,
) -> datetime.datetime:
"""Convert datetime-like string as datetime object. The default settings
uses dateutil to automatically parse the string, and returns
a datetime object with timezone datetime.timezone.utc
Parameters
----------
dt : Optional[Union[str, datetime.datetime]]
Datetime string. If dt is already a datetime object or None, returns
dt
format_str : Optional[str], optional
Input format string, by default None
tzinfo : datetime.timezone, optional
Timezone, by default datetime.timezone.utc
Returns
-------
datetime.datetime
Returns dt as a datetime object
"""
if dt is None or isinstance(dt, datetime.datetime):
return dt
if format_str is None:
return dateutil.parser.parse(dt).astimezone(UTC)
else:
return datetime.datetime.strptime(dt, format_str, tzinfo=tzinfo) |
Python | def print_log(
msg: str, print_: bool = False, logger: logging.Logger = None
) -> None:
"""Print and / or log a message
Parameters
----------
msg : str
Message string
print_ : bool, optional
Whether to pring message, by default False
logger : logging.Logger, optional
Logger that, if supplied, message will be written to, by default None
"""
if logger is None or print_:
print(msg)
if logger is not None:
logger.info(msg) | def print_log(
msg: str, print_: bool = False, logger: logging.Logger = None
) -> None:
"""Print and / or log a message
Parameters
----------
msg : str
Message string
print_ : bool, optional
Whether to pring message, by default False
logger : logging.Logger, optional
Logger that, if supplied, message will be written to, by default None
"""
if logger is None or print_:
print(msg)
if logger is not None:
logger.info(msg) |
Python | def an_interaction_0(
patient: PatientAgent,
environment: EnvironmentAgent,
patient_time: datetime.datetime,
):
"""Interaction between patient and environment:
- generates new Patient record entries;
- decides which Environment the patient should visit next and at what time;
- optionally applies custom updates the Patient and Environment agents.
Parameters
----------
patient : PatientAgent
The patient agent
environment : EnvironmentAgent
The environment agent
patient_time : datetime.datetime
The time from the patient's persective
Returns
-------
patient : PatientAgent
Patient agent, possible updated
environment : EnvironmentAgent
The environment agent, possible updated
patient_time : datetime.datetime
The time from the patient's persective for the next interaction
update_data : Dict[str, List[dict]]
Dictionary containing new patient record entries as
{"new_patient_record_entries": [entry_0, entry_1,...]},
where each entry is itself a dictionary.
These will get automatically added to the patient record.
In future, update_data could contain other data too, which is why it is
strucured in this way.
next_environment_id_to_prob : Dict[Union[str, int], float]
Dictionary contaning next environment_ids as keys and the proabiilties
to transition to them
next_environment_id_to_time : Dict[Union[str, int], datetime.timedelta]
Dictionary contaning next environment_ids as keys and time period
from initial patient_time to transition to them
"""
# ADD CODE
# Makes new patient record entries, decides next environment transition
# probability and time. Can update patient and environment
# Note that the intelligence layer, which receives the outputs of the
# interaction functions, must be able to handle cases where
# next_environment_id_to_prob and next_environment_id_to_time are empty
# (these are the outputs of the default death interaction)
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
) | def an_interaction_0(
patient: PatientAgent,
environment: EnvironmentAgent,
patient_time: datetime.datetime,
):
"""Interaction between patient and environment:
- generates new Patient record entries;
- decides which Environment the patient should visit next and at what time;
- optionally applies custom updates the Patient and Environment agents.
Parameters
----------
patient : PatientAgent
The patient agent
environment : EnvironmentAgent
The environment agent
patient_time : datetime.datetime
The time from the patient's persective
Returns
-------
patient : PatientAgent
Patient agent, possible updated
environment : EnvironmentAgent
The environment agent, possible updated
patient_time : datetime.datetime
The time from the patient's persective for the next interaction
update_data : Dict[str, List[dict]]
Dictionary containing new patient record entries as
{"new_patient_record_entries": [entry_0, entry_1,...]},
where each entry is itself a dictionary.
These will get automatically added to the patient record.
In future, update_data could contain other data too, which is why it is
strucured in this way.
next_environment_id_to_prob : Dict[Union[str, int], float]
Dictionary contaning next environment_ids as keys and the proabiilties
to transition to them
next_environment_id_to_time : Dict[Union[str, int], datetime.timedelta]
Dictionary contaning next environment_ids as keys and time period
from initial patient_time to transition to them
"""
# ADD CODE
# Makes new patient record entries, decides next environment transition
# probability and time. Can update patient and environment
# Note that the intelligence layer, which receives the outputs of the
# interaction functions, must be able to handle cases where
# next_environment_id_to_prob and next_environment_id_to_time are empty
# (these are the outputs of the default death interaction)
return (
patient,
environment,
update_data,
next_environment_id_to_prob,
next_environment_id_to_time,
) |
Python | def intelligence(
patient: PatientAgent,
environment: EnvironmentAgent,
patient_time: datetime.datetime,
interaction_mapper: Dict[str, Callable],
):
"""Intelligence layer - decides which interaction(s) to apply to patient
and environment.
Parameters
----------
patient : PatientAgent
The patient agent
environment : EnvironmentAgent
The environment agent
patient_time : datetime.datetime
The time from the patient's persective
interaction_mapper : Dict[str, Callable]
Dictionary mapping interaction names to corresponding function
handles
Returns
-------
patient : PatientAgent
Patient agent, possible updated
environment : EnvironmentAgent
The environment agent, possible updated
patient_time : datetime.datetime
The time from the patient's persective for the next interaction
update_data : Dict[str, List[dict]]
Dictionary containing new patient record entries as
{"new_patient_record_entries": [entry_0, entry_1,...]},
where each entry is itself a dictionary.
These will get automatically added to the patient record.
In future, update_data could contain other data too, which is why it is
strucured in this way.
next_environment_id : Union[int, str]
environment_id of the next environment the patient should visit,
sampled from next_environment_id_to_prob
interaction_name : List[str]
Name of the interactions that were applied here.
next_environment_id_to_prob : Dict[Union[str, int], float]
Dictionary contaning next environment_ids as keys and the proabiilties
to transition to them
next_environment_id_to_time : Dict[Union[str, int], datetime.timedelta]
Dictionary contaning next environment_ids as keys and time period
from initial patient_time to transition to them
"""
# ADD CODE
# < some logic that decides which
# interaction function to apply to the patient and environment,
# and samples from next_environment_id_to_prob >
return (
patient,
environment,
patient_time,
update_data,
next_environment_id,
interaction_names,
next_environment_id_to_prob,
next_environment_id_to_time,
) | def intelligence(
patient: PatientAgent,
environment: EnvironmentAgent,
patient_time: datetime.datetime,
interaction_mapper: Dict[str, Callable],
):
"""Intelligence layer - decides which interaction(s) to apply to patient
and environment.
Parameters
----------
patient : PatientAgent
The patient agent
environment : EnvironmentAgent
The environment agent
patient_time : datetime.datetime
The time from the patient's persective
interaction_mapper : Dict[str, Callable]
Dictionary mapping interaction names to corresponding function
handles
Returns
-------
patient : PatientAgent
Patient agent, possible updated
environment : EnvironmentAgent
The environment agent, possible updated
patient_time : datetime.datetime
The time from the patient's persective for the next interaction
update_data : Dict[str, List[dict]]
Dictionary containing new patient record entries as
{"new_patient_record_entries": [entry_0, entry_1,...]},
where each entry is itself a dictionary.
These will get automatically added to the patient record.
In future, update_data could contain other data too, which is why it is
strucured in this way.
next_environment_id : Union[int, str]
environment_id of the next environment the patient should visit,
sampled from next_environment_id_to_prob
interaction_name : List[str]
Name of the interactions that were applied here.
next_environment_id_to_prob : Dict[Union[str, int], float]
Dictionary contaning next environment_ids as keys and the proabiilties
to transition to them
next_environment_id_to_time : Dict[Union[str, int], datetime.timedelta]
Dictionary contaning next environment_ids as keys and time period
from initial patient_time to transition to them
"""
# ADD CODE
# < some logic that decides which
# interaction function to apply to the patient and environment,
# and samples from next_environment_id_to_prob >
return (
patient,
environment,
patient_time,
update_data,
next_environment_id,
interaction_names,
next_environment_id_to_prob,
next_environment_id_to_time,
) |
Python | def convert_path(function: Callable):
"""
Decorator function to convert path to pathlib.Path
"""
@wraps(function)
def wrapper(self, path: Union[Path, str], *args, **kwargs):
if isinstance(path, Path):
pass
elif isinstance(path, str):
path = Path(path)
else:
raise ValueError("`path` must be string or pathlib.Path type")
return function(self, path, *args, **kwargs)
return wrapper | def convert_path(function: Callable):
"""
Decorator function to convert path to pathlib.Path
"""
@wraps(function)
def wrapper(self, path: Union[Path, str], *args, **kwargs):
if isinstance(path, Path):
pass
elif isinstance(path, str):
path = Path(path)
else:
raise ValueError("`path` must be string or pathlib.Path type")
return function(self, path, *args, **kwargs)
return wrapper |
Python | def make_dir(function: Callable):
"""
Decorator function to make path directory
"""
@wraps(function)
def wrapper(self, path: Union[Path, str], *args, **kwargs):
if path.suffix == "":
path.mkdir(exist_ok=True, parents=True)
else:
path.parent.mkdir(exist_ok=True, parents=True)
return function(self, path, *args, **kwargs)
return wrapper | def make_dir(function: Callable):
"""
Decorator function to make path directory
"""
@wraps(function)
def wrapper(self, path: Union[Path, str], *args, **kwargs):
if path.suffix == "":
path.mkdir(exist_ok=True, parents=True)
else:
path.parent.mkdir(exist_ok=True, parents=True)
return function(self, path, *args, **kwargs)
return wrapper |
Python | def parse_config(config: Union[dict, str, Path]) -> dict:
"""Parse and validate config file
Parameters
----------
config : Union[dict, str, Path]
Config data, either loaded as dictionary or path to file
Returns
-------
dict
Return config if passes validation, else error is raised
"""
if isinstance(config, str) or isinstance(config, Path):
config = DataHandler().load_json(config)
for agents in ["patients", "environments"]:
if isinstance(config[agents], str):
config[agents] = _parse_agents_config_from_file(
config[agents], agents
)
validate_unique_environment_ids(config["environments"], "environment_id")
Config(**config)
return config | def parse_config(config: Union[dict, str, Path]) -> dict:
"""Parse and validate config file
Parameters
----------
config : Union[dict, str, Path]
Config data, either loaded as dictionary or path to file
Returns
-------
dict
Return config if passes validation, else error is raised
"""
if isinstance(config, str) or isinstance(config, Path):
config = DataHandler().load_json(config)
for agents in ["patients", "environments"]:
if isinstance(config[agents], str):
config[agents] = _parse_agents_config_from_file(
config[agents], agents
)
validate_unique_environment_ids(config["environments"], "environment_id")
Config(**config)
return config |
Python | def _enter_transaction_management(self, managed):
"""
Switch the isolation level when needing transaction support, so that
the same transaction is visible across all the queries.
"""
if self.features.uses_autocommit and managed and not self.isolation_level:
self._set_isolation_level(1) | def _enter_transaction_management(self, managed):
"""
Switch the isolation level when needing transaction support, so that
the same transaction is visible across all the queries.
"""
if self.features.uses_autocommit and managed and not self.isolation_level:
self._set_isolation_level(1) |
Python | def _leave_transaction_management(self, managed):
"""
If the normal operating mode is "autocommit", switch back to that when
leaving transaction management.
"""
if self.features.uses_autocommit and not managed and self.isolation_level:
self._set_isolation_level(0) | def _leave_transaction_management(self, managed):
"""
If the normal operating mode is "autocommit", switch back to that when
leaving transaction management.
"""
if self.features.uses_autocommit and not managed and self.isolation_level:
self._set_isolation_level(0) |
Python | def _set_isolation_level(self, level):
"""
Do all the related feature configurations for changing isolation
levels. This doesn't touch the uses_autocommit feature, since that
controls the movement *between* isolation levels.
"""
assert level in (0, 1)
try:
if self.connection is not None:
self.connection.set_isolation_level(level)
finally:
self.isolation_level = level
self.features.uses_savepoints = bool(level) | def _set_isolation_level(self, level):
"""
Do all the related feature configurations for changing isolation
levels. This doesn't touch the uses_autocommit feature, since that
controls the movement *between* isolation levels.
"""
assert level in (0, 1)
try:
if self.connection is not None:
self.connection.set_isolation_level(level)
finally:
self.isolation_level = level
self.features.uses_savepoints = bool(level) |
Python | def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__dispatch = None
def __init__(self, args, kw):
self.__func = func
self.__args = args
self.__kw = kw
if self.__dispatch is None:
self.__prepare_class__()
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(self.__func, self.__args, self.__kw) + resultclasses
)
def __prepare_class__(cls):
cls.__dispatch = {}
for resultclass in resultclasses:
cls.__dispatch[resultclass] = {}
for type_ in reversed(resultclass.mro()):
for (k, v) in type_.__dict__.items():
# All __promise__ return the same wrapper method, but they
# also do setup, inserting the method into the dispatch
# dict.
meth = cls.__promise__(resultclass, k, v)
if hasattr(cls, k):
continue
setattr(cls, k, meth)
cls._delegate_str = str in resultclasses
cls._delegate_unicode = unicode in resultclasses
assert not (cls._delegate_str and cls._delegate_unicode), "Cannot call lazy() with both str and unicode return types."
if cls._delegate_unicode:
cls.__unicode__ = cls.__unicode_cast
elif cls._delegate_str:
cls.__str__ = cls.__str_cast
__prepare_class__ = classmethod(__prepare_class__)
def __promise__(cls, klass, funcname, func):
# Builds a wrapper around some magic method and registers that magic
# method for the given type and method name.
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = self.__func(*self.__args, **self.__kw)
for t in type(res).mro():
if t in self.__dispatch:
return self.__dispatch[t][funcname](res, *args, **kw)
raise TypeError("Lazy object returned unexpected type.")
if klass not in cls.__dispatch:
cls.__dispatch[klass] = {}
cls.__dispatch[klass][funcname] = func
return __wrapper__
__promise__ = classmethod(__promise__)
def __unicode_cast(self):
return self.__func(*self.__args, **self.__kw)
def __str_cast(self):
return str(self.__func(*self.__args, **self.__kw))
def __cmp__(self, rhs):
if self._delegate_str:
s = str(self.__func(*self.__args, **self.__kw))
elif self._delegate_unicode:
s = unicode(self.__func(*self.__args, **self.__kw))
else:
s = self.__func(*self.__args, **self.__kw)
if isinstance(rhs, Promise):
return -cmp(rhs, s)
else:
return cmp(s, rhs)
def __mod__(self, rhs):
if self._delegate_str:
return str(self) % rhs
elif self._delegate_unicode:
return unicode(self) % rhs
else:
raise AssertionError('__mod__ not supported for non-string types')
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__ | def lazy(func, *resultclasses):
"""
Turns any callable into a lazy evaluated callable. You need to give result
classes or types -- at least one is needed so that the automatic forcing of
the lazy evaluation code is triggered. Results are not memoized; the
function is evaluated on every access.
"""
class __proxy__(Promise):
"""
Encapsulate a function call and act as a proxy for methods that are
called on the result of that function. The function is not evaluated
until one of the methods on the result is called.
"""
__dispatch = None
def __init__(self, args, kw):
self.__func = func
self.__args = args
self.__kw = kw
if self.__dispatch is None:
self.__prepare_class__()
def __reduce__(self):
return (
_lazy_proxy_unpickle,
(self.__func, self.__args, self.__kw) + resultclasses
)
def __prepare_class__(cls):
cls.__dispatch = {}
for resultclass in resultclasses:
cls.__dispatch[resultclass] = {}
for type_ in reversed(resultclass.mro()):
for (k, v) in type_.__dict__.items():
# All __promise__ return the same wrapper method, but they
# also do setup, inserting the method into the dispatch
# dict.
meth = cls.__promise__(resultclass, k, v)
if hasattr(cls, k):
continue
setattr(cls, k, meth)
cls._delegate_str = str in resultclasses
cls._delegate_unicode = unicode in resultclasses
assert not (cls._delegate_str and cls._delegate_unicode), "Cannot call lazy() with both str and unicode return types."
if cls._delegate_unicode:
cls.__unicode__ = cls.__unicode_cast
elif cls._delegate_str:
cls.__str__ = cls.__str_cast
__prepare_class__ = classmethod(__prepare_class__)
def __promise__(cls, klass, funcname, func):
# Builds a wrapper around some magic method and registers that magic
# method for the given type and method name.
def __wrapper__(self, *args, **kw):
# Automatically triggers the evaluation of a lazy value and
# applies the given magic method of the result type.
res = self.__func(*self.__args, **self.__kw)
for t in type(res).mro():
if t in self.__dispatch:
return self.__dispatch[t][funcname](res, *args, **kw)
raise TypeError("Lazy object returned unexpected type.")
if klass not in cls.__dispatch:
cls.__dispatch[klass] = {}
cls.__dispatch[klass][funcname] = func
return __wrapper__
__promise__ = classmethod(__promise__)
def __unicode_cast(self):
return self.__func(*self.__args, **self.__kw)
def __str_cast(self):
return str(self.__func(*self.__args, **self.__kw))
def __cmp__(self, rhs):
if self._delegate_str:
s = str(self.__func(*self.__args, **self.__kw))
elif self._delegate_unicode:
s = unicode(self.__func(*self.__args, **self.__kw))
else:
s = self.__func(*self.__args, **self.__kw)
if isinstance(rhs, Promise):
return -cmp(rhs, s)
else:
return cmp(s, rhs)
def __mod__(self, rhs):
if self._delegate_str:
return str(self) % rhs
elif self._delegate_unicode:
return unicode(self) % rhs
else:
raise AssertionError('__mod__ not supported for non-string types')
def __deepcopy__(self, memo):
# Instances of this class are effectively immutable. It's just a
# collection of functions. So we don't need to do anything
# complicated for copying.
memo[id(self)] = self
return self
@wraps(func)
def __wrapper__(*args, **kw):
# Creates the proxy object, instead of the actual value.
return __proxy__(args, kw)
return __wrapper__ |
Python | def allow_lazy(func, *resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
@wraps(func)
def wrapper(*args, **kwargs):
for arg in list(args) + kwargs.values():
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy(func, *resultclasses)(*args, **kwargs)
return wrapper | def allow_lazy(func, *resultclasses):
"""
A decorator that allows a function to be called with one or more lazy
arguments. If none of the args are lazy, the function is evaluated
immediately, otherwise a __proxy__ is returned that will evaluate the
function when needed.
"""
@wraps(func)
def wrapper(*args, **kwargs):
for arg in list(args) + kwargs.values():
if isinstance(arg, Promise):
break
else:
return func(*args, **kwargs)
return lazy(func, *resultclasses)(*args, **kwargs)
return wrapper |
Python | def _setup(self):
"""
Must be implemented by subclasses to initialise the wrapped object.
"""
raise NotImplementedError | def _setup(self):
"""
Must be implemented by subclasses to initialise the wrapped object.
"""
raise NotImplementedError |
Python | def intcomma(value, use_l10n=True):
"""
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
if settings.USE_L10N and use_l10n:
try:
if not isinstance(value, float):
value = int(value)
except (TypeError, ValueError):
return intcomma(value, False)
else:
return number_format(value)
orig = force_unicode(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new, use_l10n) | def intcomma(value, use_l10n=True):
"""
Converts an integer to a string containing commas every three digits.
For example, 3000 becomes '3,000' and 45000 becomes '45,000'.
"""
if settings.USE_L10N and use_l10n:
try:
if not isinstance(value, float):
value = int(value)
except (TypeError, ValueError):
return intcomma(value, False)
else:
return number_format(value)
orig = force_unicode(value)
new = re.sub("^(-?\d+)(\d{3})", '\g<1>,\g<2>', orig)
if orig == new:
return new
else:
return intcomma(new, use_l10n) |
Python | def intword(value):
"""
Converts a large integer to a friendly text representation. Works best for
numbers over 1 million. For example, 1000000 becomes '1.0 million', 1200000
becomes '1.2 million' and '1200000000' becomes '1.2 billion'.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < 1000000:
return value
def _check_for_i18n(value, float_formatted, string_formatted):
"""
Use the i18n enabled defaultfilters.floatformat if possible
"""
if settings.USE_L10N:
return defaultfilters.floatformat(value, 1), string_formatted
return value, float_formatted
if value < 1000000000:
new_value = value / 1000000.0
new_value, value_string = _check_for_i18n(new_value,
ungettext('%(value).1f million', '%(value).1f million', new_value),
ungettext('%(value)s million', '%(value)s million', new_value))
return value_string % {'value': new_value}
if value < 1000000000000:
new_value = value / 1000000000.0
new_value, value_string = _check_for_i18n(new_value,
ungettext('%(value).1f billion', '%(value).1f billion', new_value),
ungettext('%(value)s billion', '%(value)s billion', new_value))
return value_string % {'value': new_value}
if value < 1000000000000000:
new_value = value / 1000000000000.0
new_value, value_string = _check_for_i18n(new_value,
ungettext('%(value).1f trillion', '%(value).1f trillion', new_value),
ungettext('%(value)s trillion', '%(value)s trillion', new_value))
return value_string % {'value': new_value}
return value | def intword(value):
"""
Converts a large integer to a friendly text representation. Works best for
numbers over 1 million. For example, 1000000 becomes '1.0 million', 1200000
becomes '1.2 million' and '1200000000' becomes '1.2 billion'.
"""
try:
value = int(value)
except (TypeError, ValueError):
return value
if value < 1000000:
return value
def _check_for_i18n(value, float_formatted, string_formatted):
"""
Use the i18n enabled defaultfilters.floatformat if possible
"""
if settings.USE_L10N:
return defaultfilters.floatformat(value, 1), string_formatted
return value, float_formatted
if value < 1000000000:
new_value = value / 1000000.0
new_value, value_string = _check_for_i18n(new_value,
ungettext('%(value).1f million', '%(value).1f million', new_value),
ungettext('%(value)s million', '%(value)s million', new_value))
return value_string % {'value': new_value}
if value < 1000000000000:
new_value = value / 1000000000.0
new_value, value_string = _check_for_i18n(new_value,
ungettext('%(value).1f billion', '%(value).1f billion', new_value),
ungettext('%(value)s billion', '%(value)s billion', new_value))
return value_string % {'value': new_value}
if value < 1000000000000000:
new_value = value / 1000000000000.0
new_value, value_string = _check_for_i18n(new_value,
ungettext('%(value).1f trillion', '%(value).1f trillion', new_value),
ungettext('%(value)s trillion', '%(value)s trillion', new_value))
return value_string % {'value': new_value}
return value |
Python | def _check_for_i18n(value, float_formatted, string_formatted):
"""
Use the i18n enabled defaultfilters.floatformat if possible
"""
if settings.USE_L10N:
return defaultfilters.floatformat(value, 1), string_formatted
return value, float_formatted | def _check_for_i18n(value, float_formatted, string_formatted):
"""
Use the i18n enabled defaultfilters.floatformat if possible
"""
if settings.USE_L10N:
return defaultfilters.floatformat(value, 1), string_formatted
return value, float_formatted |
Python | def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=u''):
"""
Asserts that a field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in EMPTY_VALUES
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **dict(field_kwargs, required=False))
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [u'This field is required.']
for e in EMPTY_VALUES:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length':2, 'max_length':20})
self.assertTrue(isinstance(fieldclass(*field_args, **field_kwargs), fieldclass)) | def assertFieldOutput(self, fieldclass, valid, invalid, field_args=None,
field_kwargs=None, empty_value=u''):
"""
Asserts that a field behaves correctly with various inputs.
Args:
fieldclass: the class of the field to be tested.
valid: a dictionary mapping valid inputs to their expected
cleaned values.
invalid: a dictionary mapping invalid inputs to one or more
raised error messages.
field_args: the args passed to instantiate the field
field_kwargs: the kwargs passed to instantiate the field
empty_value: the expected clean output for inputs in EMPTY_VALUES
"""
if field_args is None:
field_args = []
if field_kwargs is None:
field_kwargs = {}
required = fieldclass(*field_args, **field_kwargs)
optional = fieldclass(*field_args, **dict(field_kwargs, required=False))
# test valid inputs
for input, output in valid.items():
self.assertEqual(required.clean(input), output)
self.assertEqual(optional.clean(input), output)
# test invalid inputs
for input, errors in invalid.items():
with self.assertRaises(ValidationError) as context_manager:
required.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
with self.assertRaises(ValidationError) as context_manager:
optional.clean(input)
self.assertEqual(context_manager.exception.messages, errors)
# test required inputs
error_required = [u'This field is required.']
for e in EMPTY_VALUES:
with self.assertRaises(ValidationError) as context_manager:
required.clean(e)
self.assertEqual(context_manager.exception.messages, error_required)
self.assertEqual(optional.clean(e), empty_value)
# test that max_length and min_length are always accepted
if issubclass(fieldclass, CharField):
field_kwargs.update({'min_length':2, 'max_length':20})
self.assertTrue(isinstance(fieldclass(*field_args, **field_kwargs), fieldclass)) |
Python | def assertNotContains(self, response, text, status_code=200,
msg_prefix=''):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
text = smart_str(text, response._charset)
self.assertEqual(response.content.count(text), 0,
msg_prefix + "Response should not contain '%s'" % text) | def assertNotContains(self, response, text, status_code=200,
msg_prefix=''):
"""
Asserts that a response indicates that some content was retrieved
successfully, (i.e., the HTTP status code was as expected), and that
``text`` doesn't occurs in the content of the response.
"""
if msg_prefix:
msg_prefix += ": "
self.assertEqual(response.status_code, status_code,
msg_prefix + "Couldn't retrieve content: Response code was %d"
" (expected %d)" % (response.status_code, status_code))
text = smart_str(text, response._charset)
self.assertEqual(response.content.count(text), 0,
msg_prefix + "Response should not contain '%s'" % text) |
Python | def assertTemplateUsed(self, response, template_name, msg_prefix=''):
"""
Asserts that the template with the provided name was used in rendering
the response.
"""
if msg_prefix:
msg_prefix += ": "
template_names = [t.name for t in response.templates]
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s" %
(template_name, u', '.join(template_names))) | def assertTemplateUsed(self, response, template_name, msg_prefix=''):
"""
Asserts that the template with the provided name was used in rendering
the response.
"""
if msg_prefix:
msg_prefix += ": "
template_names = [t.name for t in response.templates]
if not template_names:
self.fail(msg_prefix + "No templates used to render the response")
self.assertTrue(template_name in template_names,
msg_prefix + "Template '%s' was not a template used to render"
" the response. Actual template(s) used: %s" %
(template_name, u', '.join(template_names))) |
Python | def assertTemplateNotUsed(self, response, template_name, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response.
"""
if msg_prefix:
msg_prefix += ": "
template_names = [t.name for t in response.templates]
self.assertFalse(template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering"
" the response" % template_name) | def assertTemplateNotUsed(self, response, template_name, msg_prefix=''):
"""
Asserts that the template with the provided name was NOT used in
rendering the response.
"""
if msg_prefix:
msg_prefix += ": "
template_names = [t.name for t in response.templates]
self.assertFalse(template_name in template_names,
msg_prefix + "Template '%s' was used unexpectedly in rendering"
" the response" % template_name) |
Python | def connections_support_transactions():
"""
Returns True if all connections support transactions.
"""
return all(conn.features.supports_transactions for conn in connections.all()) | def connections_support_transactions():
"""
Returns True if all connections support transactions.
"""
return all(conn.features.supports_transactions for conn in connections.all()) |
Python | def PNN(feature_dim_dict, embedding_size=8, hidden_size=(128, 128), l2_reg_embedding=1e-5, l2_reg_deep=0,
init_std=0.0001, seed=1024, keep_prob=1, activation='relu',
final_activation='sigmoid', use_inner=True, use_outter=False, kernel_type='mat',output_dim=1, ):
"""Instantiates the Product-based Neural Network architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_embedding: float . L2 regularizer strength applied to embedding vector
:param l2_reg_deep: float. L2 regularizer strength applied to deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param activation: Activation function to use in deep net
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:param use_inner: bool,whether use inner-product or not.
:param use_outter: bool,whether use outter-product or not.
:param kernel_type: str,kernel_type used in outter-product,can be ``'mat'`` , ``'vec'`` or ``'num'``
:return: A Keras model instance.
"""
check_feature_config_dict(feature_dim_dict)
if kernel_type not in ['mat', 'vec', 'num']:
raise ValueError("kernel_type must be mat,vec or num")
deep_emb_list, _, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, 0, init_std,
seed, True)
inner_product = tf.keras.layers.Flatten()(InnerProductLayer()(deep_emb_list))
outter_product = OutterProductLayer(kernel_type)(deep_emb_list)
# ipnn deep input
linear_signal = tf.keras.layers.Reshape(
[len(deep_emb_list)*embedding_size])(concat_fun(deep_emb_list))
if use_inner and use_outter:
deep_input = tf.keras.layers.Concatenate()(
[linear_signal, inner_product, outter_product])
elif use_inner:
deep_input = tf.keras.layers.Concatenate()(
[linear_signal, inner_product])
elif use_outter:
deep_input = tf.keras.layers.Concatenate()(
[linear_signal, outter_product])
else:
deep_input = linear_signal
output=[]
for _ in range(output_dim):
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
False, seed)(deep_input)
deep_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
output.append(PredictionLayer(final_activation)(deep_logit))
model = tf.keras.models.Model(inputs=inputs_list,
outputs=output)
return model | def PNN(feature_dim_dict, embedding_size=8, hidden_size=(128, 128), l2_reg_embedding=1e-5, l2_reg_deep=0,
init_std=0.0001, seed=1024, keep_prob=1, activation='relu',
final_activation='sigmoid', use_inner=True, use_outter=False, kernel_type='mat',output_dim=1, ):
"""Instantiates the Product-based Neural Network architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_embedding: float . L2 regularizer strength applied to embedding vector
:param l2_reg_deep: float. L2 regularizer strength applied to deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param activation: Activation function to use in deep net
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:param use_inner: bool,whether use inner-product or not.
:param use_outter: bool,whether use outter-product or not.
:param kernel_type: str,kernel_type used in outter-product,can be ``'mat'`` , ``'vec'`` or ``'num'``
:return: A Keras model instance.
"""
check_feature_config_dict(feature_dim_dict)
if kernel_type not in ['mat', 'vec', 'num']:
raise ValueError("kernel_type must be mat,vec or num")
deep_emb_list, _, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, 0, init_std,
seed, True)
inner_product = tf.keras.layers.Flatten()(InnerProductLayer()(deep_emb_list))
outter_product = OutterProductLayer(kernel_type)(deep_emb_list)
# ipnn deep input
linear_signal = tf.keras.layers.Reshape(
[len(deep_emb_list)*embedding_size])(concat_fun(deep_emb_list))
if use_inner and use_outter:
deep_input = tf.keras.layers.Concatenate()(
[linear_signal, inner_product, outter_product])
elif use_inner:
deep_input = tf.keras.layers.Concatenate()(
[linear_signal, inner_product])
elif use_outter:
deep_input = tf.keras.layers.Concatenate()(
[linear_signal, outter_product])
else:
deep_input = linear_signal
output=[]
for _ in range(output_dim):
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
False, seed)(deep_input)
deep_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
output.append(PredictionLayer(final_activation)(deep_logit))
model = tf.keras.models.Model(inputs=inputs_list,
outputs=output)
return model |
Python | def WDL(deep_feature_dim_dict, wide_feature_dim_dict, embedding_size=8, hidden_size=(128, 128), l2_reg_linear=1e-5, l2_reg_embedding=1e-5, l2_reg_deep=0, init_std=0.0001, seed=1024, keep_prob=1, activation='relu', final_activation='sigmoid',output_dim=1,):
"""Instantiates the Wide&Deep Learning architecture.
:param deep_feature_dim_dict: dict,to indicate sparse field and dense field in deep part like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param wide_feature_dim_dict: dict,to indicate sparse field and dense field in wide part like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_linear: float. L2 regularizer strength applied to wide part
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_deep: float. L2 regularizer strength applied to deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param activation: Activation function to use in deep net
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:return: A Keras model instance.
"""
if not isinstance(deep_feature_dim_dict,
dict) or "sparse" not in deep_feature_dim_dict or "dense" not in deep_feature_dim_dict:
raise ValueError(
"feature_dim must be a dict like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_5',]}")
sparse_input, dense_input, = create_singlefeat_dict(
deep_feature_dim_dict)
bias_sparse_input, bias_dense_input = create_singlefeat_dict(
wide_feature_dim_dict, 'bias')
sparse_embedding = create_embedding_dict(
deep_feature_dim_dict, embedding_size, init_std, seed, l2_reg_embedding)
wide_linear_embedding = create_embedding_dict(
wide_feature_dim_dict, 1, init_std, seed, l2_reg_linear, 'linear')
embed_list = get_embedding_vec_list(sparse_embedding, sparse_input)
deep_input = Concatenate()(embed_list) if len(
embed_list) > 1 else embed_list[0]
deep_input = Flatten()(deep_input)
if len(dense_input) > 0:
deep_input = Concatenate()([deep_input]+list(dense_input.values()))
output=[]
for _ in range(output_dim):
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
False, seed)(deep_input)
deep_logit = Dense(1, use_bias=False, activation=None)(deep_out)
final_logit = deep_logit
if len(wide_feature_dim_dict['dense']) + len(wide_feature_dim_dict['sparse']) > 0:
if len(wide_feature_dim_dict['sparse']) > 0:
bias_embed_list = get_embedding_vec_list(
wide_linear_embedding, bias_sparse_input)
linear_term = add(bias_embed_list) if len(
bias_embed_list) > 1 else bias_embed_list[0]
final_logit = add([final_logit, linear_term])
if len(wide_feature_dim_dict['dense']) > 0:
wide_dense_term = Dense(1, use_bias=False, activation=None)(Concatenate()(
list(bias_dense_input.values())) if len(bias_dense_input) > 1 else list(bias_dense_input.values())[0])
final_logit = add([final_logit, wide_dense_term])
output.append(PredictionLayer(final_activation)(final_logit))
inputs_list = get_inputs_list(
[sparse_input, dense_input, bias_sparse_input, bias_dense_input])
model = Model(inputs=inputs_list, outputs=output)
return model | def WDL(deep_feature_dim_dict, wide_feature_dim_dict, embedding_size=8, hidden_size=(128, 128), l2_reg_linear=1e-5, l2_reg_embedding=1e-5, l2_reg_deep=0, init_std=0.0001, seed=1024, keep_prob=1, activation='relu', final_activation='sigmoid',output_dim=1,):
"""Instantiates the Wide&Deep Learning architecture.
:param deep_feature_dim_dict: dict,to indicate sparse field and dense field in deep part like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param wide_feature_dim_dict: dict,to indicate sparse field and dense field in wide part like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_linear: float. L2 regularizer strength applied to wide part
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_deep: float. L2 regularizer strength applied to deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param activation: Activation function to use in deep net
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:return: A Keras model instance.
"""
if not isinstance(deep_feature_dim_dict,
dict) or "sparse" not in deep_feature_dim_dict or "dense" not in deep_feature_dim_dict:
raise ValueError(
"feature_dim must be a dict like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_5',]}")
sparse_input, dense_input, = create_singlefeat_dict(
deep_feature_dim_dict)
bias_sparse_input, bias_dense_input = create_singlefeat_dict(
wide_feature_dim_dict, 'bias')
sparse_embedding = create_embedding_dict(
deep_feature_dim_dict, embedding_size, init_std, seed, l2_reg_embedding)
wide_linear_embedding = create_embedding_dict(
wide_feature_dim_dict, 1, init_std, seed, l2_reg_linear, 'linear')
embed_list = get_embedding_vec_list(sparse_embedding, sparse_input)
deep_input = Concatenate()(embed_list) if len(
embed_list) > 1 else embed_list[0]
deep_input = Flatten()(deep_input)
if len(dense_input) > 0:
deep_input = Concatenate()([deep_input]+list(dense_input.values()))
output=[]
for _ in range(output_dim):
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
False, seed)(deep_input)
deep_logit = Dense(1, use_bias=False, activation=None)(deep_out)
final_logit = deep_logit
if len(wide_feature_dim_dict['dense']) + len(wide_feature_dim_dict['sparse']) > 0:
if len(wide_feature_dim_dict['sparse']) > 0:
bias_embed_list = get_embedding_vec_list(
wide_linear_embedding, bias_sparse_input)
linear_term = add(bias_embed_list) if len(
bias_embed_list) > 1 else bias_embed_list[0]
final_logit = add([final_logit, linear_term])
if len(wide_feature_dim_dict['dense']) > 0:
wide_dense_term = Dense(1, use_bias=False, activation=None)(Concatenate()(
list(bias_dense_input.values())) if len(bias_dense_input) > 1 else list(bias_dense_input.values())[0])
final_logit = add([final_logit, wide_dense_term])
output.append(PredictionLayer(final_activation)(final_logit))
inputs_list = get_inputs_list(
[sparse_input, dense_input, bias_sparse_input, bias_dense_input])
model = Model(inputs=inputs_list, outputs=output)
return model |
Python | def DeepFM(feature_dim_dict, embedding_size=8,
use_fm=True, hidden_size=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_deep=0,
init_std=0.0001, seed=1024, keep_prob=1, activation='relu', final_activation='sigmoid', use_bn=False,
output_dim=1,):
"""Instantiates the DeepFM Network architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param use_fm: bool,use FM part or not
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_linear: float. L2 regularizer strength applied to linear part
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_deep: float. L2 regularizer strength applied to deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param activation: Activation function to use in deep net
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:param use_bn: bool. Whether use BatchNormalization before activation or not.in deep net
:return: A Keras model instance.
"""
check_feature_config_dict(feature_dim_dict)
deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, l2_reg_linear, init_std,
seed, True)
fm_input = concat_fun(deep_emb_list, axis=1)
deep_input = tf.keras.layers.Flatten()(fm_input)
output=[]
for _ in range(output_dim):
fm_out = FM()(fm_input)
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
use_bn, seed)(deep_input)
deep_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
if len(hidden_size) == 0 and use_fm == False: # only linear
final_logit = linear_logit
elif len(hidden_size) == 0 and use_fm == True: # linear + FM
final_logit = tf.keras.layers.add([linear_logit, fm_out])
elif len(hidden_size) > 0 and use_fm == False: # linear + Deep
final_logit = tf.keras.layers.add([linear_logit, deep_logit])
elif len(hidden_size) > 0 and use_fm == True: # linear + FM + Deep
final_logit = tf.keras.layers.add([linear_logit, fm_out, deep_logit])
else:
raise NotImplementedError
output.append(PredictionLayer(final_activation)(final_logit))
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model | def DeepFM(feature_dim_dict, embedding_size=8,
use_fm=True, hidden_size=(128, 128), l2_reg_linear=0.00001, l2_reg_embedding=0.00001, l2_reg_deep=0,
init_std=0.0001, seed=1024, keep_prob=1, activation='relu', final_activation='sigmoid', use_bn=False,
output_dim=1,):
"""Instantiates the DeepFM Network architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param use_fm: bool,use FM part or not
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_linear: float. L2 regularizer strength applied to linear part
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_deep: float. L2 regularizer strength applied to deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param activation: Activation function to use in deep net
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:param use_bn: bool. Whether use BatchNormalization before activation or not.in deep net
:return: A Keras model instance.
"""
check_feature_config_dict(feature_dim_dict)
deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, l2_reg_linear, init_std,
seed, True)
fm_input = concat_fun(deep_emb_list, axis=1)
deep_input = tf.keras.layers.Flatten()(fm_input)
output=[]
for _ in range(output_dim):
fm_out = FM()(fm_input)
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
use_bn, seed)(deep_input)
deep_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
if len(hidden_size) == 0 and use_fm == False: # only linear
final_logit = linear_logit
elif len(hidden_size) == 0 and use_fm == True: # linear + FM
final_logit = tf.keras.layers.add([linear_logit, fm_out])
elif len(hidden_size) > 0 and use_fm == False: # linear + Deep
final_logit = tf.keras.layers.add([linear_logit, deep_logit])
elif len(hidden_size) > 0 and use_fm == True: # linear + FM + Deep
final_logit = tf.keras.layers.add([linear_logit, fm_out, deep_logit])
else:
raise NotImplementedError
output.append(PredictionLayer(final_activation)(final_logit))
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model |
Python | def AutoInt(feature_dim_dict, embedding_size=8, att_layer_num=3, att_embedding_size=8, att_head_num=2, att_res=True, hidden_size=(256, 256), activation='relu',
l2_reg_deep=0, l2_reg_embedding=1e-5, use_bn=False, keep_prob=1.0, init_std=0.0001, seed=1024,
final_activation='sigmoid',
output_dim=1,):
"""Instantiates the AutoInt Network architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param att_layer_num: int.The InteractingLayer number to be used.
:param att_embedding_size: int.The embedding size in multi-head self-attention network.
:param att_head_num: int.The head number in multi-head self-attention network.
:param att_res: bool.Whether or not use standard residual connections before output.
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param activation: Activation function to use in deep net
:param l2_reg_deep: float. L2 regularizer strength applied to deep net
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param use_bn: bool. Whether use BatchNormalization before activation or not.in deep net
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param final_activation: output activation,usually ``'sigmoid'`` or ``'linear'``
:return: A Keras model instance.
"""
if len(hidden_size) <= 0 and att_layer_num <= 0:
raise ValueError("Either hidden_layer or att_layer_num must > 0")
check_feature_config_dict(feature_dim_dict)
deep_emb_list, _, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, 0, init_std,
seed, False)
att_input = concat_fun(deep_emb_list, axis=1)
output=[]
for _ in range(output_dim):
for _ in range(att_layer_num):
att_input = InteractingLayer(
att_embedding_size, att_head_num, att_res)(att_input)
att_output = tf.keras.layers.Flatten()(att_input)
deep_input = tf.keras.layers.Flatten()(concat_fun(deep_emb_list))
if len(hidden_size) > 0 and att_layer_num > 0: # Deep & Interacting Layer
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
use_bn, seed)(deep_input)
stack_out = tf.keras.layers.Concatenate()([att_output, deep_out])
final_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(stack_out)
elif len(hidden_size) > 0: # Only Deep
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
use_bn, seed)(deep_input)
final_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
elif att_layer_num > 0: # Only Interacting Layer
final_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(att_output)
else: # Error
raise NotImplementedError
output.append(PredictionLayer(final_activation)(final_logit))
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model | def AutoInt(feature_dim_dict, embedding_size=8, att_layer_num=3, att_embedding_size=8, att_head_num=2, att_res=True, hidden_size=(256, 256), activation='relu',
l2_reg_deep=0, l2_reg_embedding=1e-5, use_bn=False, keep_prob=1.0, init_std=0.0001, seed=1024,
final_activation='sigmoid',
output_dim=1,):
"""Instantiates the AutoInt Network architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param att_layer_num: int.The InteractingLayer number to be used.
:param att_embedding_size: int.The embedding size in multi-head self-attention network.
:param att_head_num: int.The head number in multi-head self-attention network.
:param att_res: bool.Whether or not use standard residual connections before output.
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param activation: Activation function to use in deep net
:param l2_reg_deep: float. L2 regularizer strength applied to deep net
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param use_bn: bool. Whether use BatchNormalization before activation or not.in deep net
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param final_activation: output activation,usually ``'sigmoid'`` or ``'linear'``
:return: A Keras model instance.
"""
if len(hidden_size) <= 0 and att_layer_num <= 0:
raise ValueError("Either hidden_layer or att_layer_num must > 0")
check_feature_config_dict(feature_dim_dict)
deep_emb_list, _, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, 0, init_std,
seed, False)
att_input = concat_fun(deep_emb_list, axis=1)
output=[]
for _ in range(output_dim):
for _ in range(att_layer_num):
att_input = InteractingLayer(
att_embedding_size, att_head_num, att_res)(att_input)
att_output = tf.keras.layers.Flatten()(att_input)
deep_input = tf.keras.layers.Flatten()(concat_fun(deep_emb_list))
if len(hidden_size) > 0 and att_layer_num > 0: # Deep & Interacting Layer
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
use_bn, seed)(deep_input)
stack_out = tf.keras.layers.Concatenate()([att_output, deep_out])
final_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(stack_out)
elif len(hidden_size) > 0: # Only Deep
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
use_bn, seed)(deep_input)
final_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
elif att_layer_num > 0: # Only Interacting Layer
final_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(att_output)
else: # Error
raise NotImplementedError
output.append(PredictionLayer(final_activation)(final_logit))
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model |
Python | def FNN(feature_dim_dict, embedding_size=8,
hidden_size=(128, 128),
l2_reg_embedding=1e-5, l2_reg_linear=1e-5, l2_reg_deep=0,
init_std=0.0001, seed=1024, keep_prob=1,
activation='relu', final_activation='sigmoid',
output_dim=1,):
"""Instantiates the Factorization-supported Neural Network architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_linear: float. L2 regularizer strength applied to linear weight
:param l2_reg_deep: float . L2 regularizer strength applied to deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param activation: Activation function to use in deep net
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:return: A Keras model instance.
"""
check_feature_config_dict(feature_dim_dict)
deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, l2_reg_linear, init_std,
seed, True)
deep_input = tf.keras.layers.Flatten()(concat_fun(deep_emb_list))
output=[]
for _ in range(output_dim):
deep_out = MLP(hidden_size, activation, l2_reg_deep,
keep_prob, False, seed)(deep_input)
deep_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
final_logit = tf.keras.layers.add([deep_logit, linear_logit])
output.append(PredictionLayer(final_activation)(final_logit))
model = tf.keras.models.Model(inputs=inputs_list,
outputs=output)
return model | def FNN(feature_dim_dict, embedding_size=8,
hidden_size=(128, 128),
l2_reg_embedding=1e-5, l2_reg_linear=1e-5, l2_reg_deep=0,
init_std=0.0001, seed=1024, keep_prob=1,
activation='relu', final_activation='sigmoid',
output_dim=1,):
"""Instantiates the Factorization-supported Neural Network architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_linear: float. L2 regularizer strength applied to linear weight
:param l2_reg_deep: float . L2 regularizer strength applied to deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param activation: Activation function to use in deep net
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:return: A Keras model instance.
"""
check_feature_config_dict(feature_dim_dict)
deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, l2_reg_linear, init_std,
seed, True)
deep_input = tf.keras.layers.Flatten()(concat_fun(deep_emb_list))
output=[]
for _ in range(output_dim):
deep_out = MLP(hidden_size, activation, l2_reg_deep,
keep_prob, False, seed)(deep_input)
deep_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
final_logit = tf.keras.layers.add([deep_logit, linear_logit])
output.append(PredictionLayer(final_activation)(final_logit))
model = tf.keras.models.Model(inputs=inputs_list,
outputs=output)
return model |
Python | def NFM(feature_dim_dict, embedding_size=8,
hidden_size=(128, 128), l2_reg_embedding=1e-5, l2_reg_linear=1e-5, l2_reg_deep=0,
init_std=0.0001, seed=1024, keep_prob=1, activation='relu', final_activation='sigmoid',
output_dim=1,
):
"""Instantiates the Neural Factorization Machine architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_linear: float. L2 regularizer strength applied to linear part.
:param l2_reg_deep: float . L2 regularizer strength applied to deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param activation: Activation function to use in deep net
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:return: A Keras model instance.
"""
check_feature_config_dict(feature_dim_dict)
deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, l2_reg_linear, init_std,
seed, True)
fm_input = concat_fun(deep_emb_list, axis=1)
bi_out = BiInteractionPooling()(fm_input)
bi_out = tf.keras.layers.Dropout(1 - keep_prob)(bi_out)
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
False, seed)(bi_out)
deep_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
final_logit = linear_logit
output=[]
for _ in range(output_dim):
if len(hidden_size) > 0:
final_logit = tf.keras.layers.add([final_logit, deep_logit])
output.append(PredictionLayer(final_activation)(final_logit))
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model | def NFM(feature_dim_dict, embedding_size=8,
hidden_size=(128, 128), l2_reg_embedding=1e-5, l2_reg_linear=1e-5, l2_reg_deep=0,
init_std=0.0001, seed=1024, keep_prob=1, activation='relu', final_activation='sigmoid',
output_dim=1,
):
"""Instantiates the Neural Factorization Machine architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive integer,sparse feature embedding_size
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_linear: float. L2 regularizer strength applied to linear part.
:param l2_reg_deep: float . L2 regularizer strength applied to deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param activation: Activation function to use in deep net
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:return: A Keras model instance.
"""
check_feature_config_dict(feature_dim_dict)
deep_emb_list, linear_logit, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, l2_reg_linear, init_std,
seed, True)
fm_input = concat_fun(deep_emb_list, axis=1)
bi_out = BiInteractionPooling()(fm_input)
bi_out = tf.keras.layers.Dropout(1 - keep_prob)(bi_out)
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
False, seed)(bi_out)
deep_logit = tf.keras.layers.Dense(
1, use_bias=False, activation=None)(deep_out)
final_logit = linear_logit
output=[]
for _ in range(output_dim):
if len(hidden_size) > 0:
final_logit = tf.keras.layers.add([final_logit, deep_logit])
output.append(PredictionLayer(final_activation)(final_logit))
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model |
Python | def DCN(feature_dim_dict, embedding_size='auto',
cross_num=2, hidden_size=(128, 128, ), l2_reg_embedding=1e-5, l2_reg_cross=1e-5, l2_reg_deep=0,
init_std=0.0001, seed=1024, keep_prob=1, use_bn=False, activation='relu', final_activation='sigmoid',
output_dim=1,):
"""Instantiates the Deep&Cross Network architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive int or str,sparse feature embedding_size.If set to "auto",it will be 6*pow(cardinality,025)
:param cross_num: positive integet,cross layer number
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_cross: float. L2 regularizer strength applied to cross net
:param l2_reg_deep: float. L2 regularizer strength applied to deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param use_bn: bool. Whether use BatchNormalization before activation or not.in deep net
:param activation: Activation function to use in deep net
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:return: A Keras model instance.
"""
if len(hidden_size) == 0 and cross_num == 0:
raise ValueError("Either hidden_layer or cross layer must > 0")
check_feature_config_dict(feature_dim_dict)
deep_emb_list, _, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, 0, init_std,
seed, False)
deep_input = tf.keras.layers.Flatten()(concat_fun(deep_emb_list))
output=[]
for _ in range(output_dim):
if len(hidden_size) > 0 and cross_num > 0: # Deep & Cross
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
use_bn, seed)(deep_input)
cross_out = CrossNet(cross_num, l2_reg=l2_reg_cross)(deep_input)
stack_out = tf.keras.layers.Concatenate()([cross_out, deep_out])
final_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(stack_out)
elif len(hidden_size) > 0: # Only Deep
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
use_bn, seed)(deep_input)
final_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(deep_out)
elif cross_num > 0: # Only Cross
cross_out = CrossNet(cross_num, l2_reg=l2_reg_cross)(deep_input)
final_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(cross_out)
else: # Error
raise NotImplementedError
output.append(PredictionLayer(final_activation)(final_logit))
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model | def DCN(feature_dim_dict, embedding_size='auto',
cross_num=2, hidden_size=(128, 128, ), l2_reg_embedding=1e-5, l2_reg_cross=1e-5, l2_reg_deep=0,
init_std=0.0001, seed=1024, keep_prob=1, use_bn=False, activation='relu', final_activation='sigmoid',
output_dim=1,):
"""Instantiates the Deep&Cross Network architecture.
:param feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param embedding_size: positive int or str,sparse feature embedding_size.If set to "auto",it will be 6*pow(cardinality,025)
:param cross_num: positive integet,cross layer number
:param hidden_size: list,list of positive integer or empty list, the layer number and units in each layer of deep net
:param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector
:param l2_reg_cross: float. L2 regularizer strength applied to cross net
:param l2_reg_deep: float. L2 regularizer strength applied to deep net
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param keep_prob: float in (0,1]. keep_prob used in deep net
:param use_bn: bool. Whether use BatchNormalization before activation or not.in deep net
:param activation: Activation function to use in deep net
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:return: A Keras model instance.
"""
if len(hidden_size) == 0 and cross_num == 0:
raise ValueError("Either hidden_layer or cross layer must > 0")
check_feature_config_dict(feature_dim_dict)
deep_emb_list, _, inputs_list = preprocess_input_embedding(feature_dim_dict, embedding_size,
l2_reg_embedding, 0, init_std,
seed, False)
deep_input = tf.keras.layers.Flatten()(concat_fun(deep_emb_list))
output=[]
for _ in range(output_dim):
if len(hidden_size) > 0 and cross_num > 0: # Deep & Cross
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
use_bn, seed)(deep_input)
cross_out = CrossNet(cross_num, l2_reg=l2_reg_cross)(deep_input)
stack_out = tf.keras.layers.Concatenate()([cross_out, deep_out])
final_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(stack_out)
elif len(hidden_size) > 0: # Only Deep
deep_out = MLP(hidden_size, activation, l2_reg_deep, keep_prob,
use_bn, seed)(deep_input)
final_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(deep_out)
elif cross_num > 0: # Only Cross
cross_out = CrossNet(cross_num, l2_reg=l2_reg_cross)(deep_input)
final_logit = tf.keras.layers.Dense(1, use_bias=False, activation=None)(cross_out)
else: # Error
raise NotImplementedError
output.append(PredictionLayer(final_activation)(final_logit))
model = tf.keras.models.Model(inputs=inputs_list, outputs=output)
return model |
Python | def check_version(version):
"""Return version of package on pypi.python.org using json."""
def check(version):
try:
url_pattern = 'https://pypi.python.org/pypi/mdeepctr/json'
req = requests.get(url_pattern)
latest_version = parse('0')
version = parse(version)
if req.status_code == requests.codes.ok:
j = json.loads(req.text.encode('utf-8'))
releases = j.get('releases', [])
for release in releases:
ver = parse(release)
if not ver.is_prerelease:
latest_version = max(latest_version, ver)
if latest_version > version:
logging.warning('\nDeepCTR version {0} detected. Your version is {1}.\nUse `pip install -U mdeepctr` to upgrade.Changelog: https://github.com/shenweichen/DeepCTR/releases/tag/v{0}'.format(
latest_version, version))
except Exception:
return
Thread(target=check, args=(version,)).start() | def check_version(version):
"""Return version of package on pypi.python.org using json."""
def check(version):
try:
url_pattern = 'https://pypi.python.org/pypi/mdeepctr/json'
req = requests.get(url_pattern)
latest_version = parse('0')
version = parse(version)
if req.status_code == requests.codes.ok:
j = json.loads(req.text.encode('utf-8'))
releases = j.get('releases', [])
for release in releases:
ver = parse(release)
if not ver.is_prerelease:
latest_version = max(latest_version, ver)
if latest_version > version:
logging.warning('\nDeepCTR version {0} detected. Your version is {1}.\nUse `pip install -U mdeepctr` to upgrade.Changelog: https://github.com/shenweichen/DeepCTR/releases/tag/v{0}'.format(
latest_version, version))
except Exception:
return
Thread(target=check, args=(version,)).start() |
Python | def MLR(region_feature_dim_dict, base_feature_dim_dict={"sparse": [], "dense": []}, region_num=4,
l2_reg_linear=1e-5,
init_std=0.0001, seed=1024, final_activation='sigmoid',
bias_feature_dim_dict={"sparse": [], "dense": []},
output_dim=1,):
"""Instantiates the Mixed Logistic Regression/Piece-wise Linear Model.
:param region_feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param base_feature_dim_dict: dict or None,to indicate sparse field and dense field of base learner.if None, it is same as region_feature_dim_dict
:param region_num: integer > 1,indicate the piece number
:param l2_reg_linear: float. L2 regularizer strength applied to weight
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:param bias_feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:return: A Keras model instance.
"""
if region_num <= 1:
raise ValueError("region_num must > 1")
if not isinstance(region_feature_dim_dict,
dict) or "sparse" not in region_feature_dim_dict or "dense" not in region_feature_dim_dict:
raise ValueError(
"feature_dim must be a dict like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_5',]}")
same_flag = False
if base_feature_dim_dict == {"sparse": [], "dense": []}:
base_feature_dim_dict = region_feature_dim_dict
same_flag = True
region_sparse_input, region_dense_input, base_sparse_input, base_dense_input, bias_sparse_input, bias_dense_input = get_input(
region_feature_dim_dict, base_feature_dim_dict, bias_feature_dim_dict, same_flag)
region_embeddings, base_embeddings, bias_embedding = get_embedding(
region_num, region_feature_dim_dict, base_feature_dim_dict, bias_feature_dim_dict, init_std, seed, l2_reg_linear)
if same_flag:
base_dense_input_ = region_dense_input
base_sparse_input_ = region_sparse_input
else:
base_dense_input_ = base_dense_input
base_sparse_input_ = base_sparse_input
region_dense_feature_num = len(region_feature_dim_dict['dense'])
region_sparse_feature_num = len(region_feature_dim_dict['sparse'])
base_dense_feature_num = len(base_feature_dim_dict['dense'])
base_sparse_feature_num = len(base_feature_dim_dict['sparse'])
bias_dense_feature_num = len(bias_feature_dim_dict['dense'])
bias_sparse_feature_num = len(bias_feature_dim_dict['sparse'])
if region_dense_feature_num > 1:
region_dense_logits_ = [Dense(1, )(Concatenate()(region_dense_input)) for _ in
range(region_num)]
elif region_dense_feature_num == 1:
region_dense_logits_ = [Dense(1, )(region_dense_input[0]) for _ in
range(region_num)]
if base_dense_feature_num > 1:
base_dense_logits = [Dense(1, )(Concatenate()(base_dense_input_))for _ in
range(region_num)]
elif base_dense_feature_num == 1:
base_dense_logits = [Dense(1, )(base_dense_input_[0])for _ in
range(region_num)]
if region_dense_feature_num > 0 and region_sparse_feature_num == 0:
region_logits = Concatenate()(region_dense_logits_)
elif region_dense_feature_num == 0 and region_sparse_feature_num > 0:
region_sparse_logits = [
add([region_embeddings[j][i](region_sparse_input[i])
for i in range(region_sparse_feature_num)])
if region_sparse_feature_num > 1 else region_embeddings[j][0](region_sparse_input[0])
for j in range(region_num)]
region_logits = Concatenate()(region_sparse_logits)
else:
region_sparse_logits = [
add([region_embeddings[j][i](region_sparse_input[i])
for i in range(region_sparse_feature_num)])
for j in range(region_num)]
region_logits = Concatenate()(
[add([region_sparse_logits[i], region_dense_logits_[i]]) for i in range(region_num)])
if base_dense_feature_num > 0 and base_sparse_feature_num == 0:
base_logits = base_dense_logits
elif base_dense_feature_num == 0 and base_sparse_feature_num > 0:
base_sparse_logits = [add(
[base_embeddings[j][i](base_sparse_input_[i]) for i in range(base_sparse_feature_num)]) if base_sparse_feature_num > 1 else base_embeddings[j][0](base_sparse_input_[0])
for j in range(region_num)]
base_logits = base_sparse_logits
else:
base_sparse_logits = [add(
[base_embeddings[j][i](base_sparse_input_[i]) for i in range(base_sparse_feature_num)]) if base_sparse_feature_num > 1 else base_embeddings[j][0](base_sparse_input_[0])
for j in range(region_num)]
base_logits = [add([base_sparse_logits[i], base_dense_logits[i]])
for i in range(region_num)]
# Dense(self.region_num, activation='softmax')(final_logit)
region_weights = Activation("softmax")(region_logits)
learner_score = Concatenate()(
[Activation(final_activation, name='learner' + str(i))(base_logits[i]) for i in range(region_num)])
final_logit = dot([region_weights, learner_score], axes=-1)
if bias_dense_feature_num + bias_sparse_feature_num > 0:
if bias_dense_feature_num > 1:
bias_dense_logits = Dense(1,)(Concatenate()(bias_dense_input))
elif bias_dense_feature_num == 1:
bias_dense_logits = Dense(1,)(bias_dense_input[0])
else:
pass
if bias_sparse_feature_num > 1:
bias_cate_logits = add([bias_embedding[i](bias_sparse_input[i])
for i, feat in enumerate(bias_feature_dim_dict['sparse'])])
elif bias_sparse_feature_num == 1:
bias_cate_logits = bias_embedding[0](bias_sparse_input[0])
else:
pass
if bias_dense_feature_num > 0 and bias_sparse_feature_num > 0:
bias_logits = add([bias_dense_logits, bias_cate_logits])
elif bias_dense_feature_num > 0:
bias_logits = bias_dense_logits
else:
bias_logits = bias_cate_logits
bias_prob = Activation('sigmoid')(bias_logits)
final_logit = dot([final_logit, bias_prob], axes=-1)
output = Reshape([1])(final_logit)
model = Model(inputs=region_sparse_input + region_dense_input+base_sparse_input +
base_dense_input+bias_sparse_input+bias_dense_input, outputs=output)
return model | def MLR(region_feature_dim_dict, base_feature_dim_dict={"sparse": [], "dense": []}, region_num=4,
l2_reg_linear=1e-5,
init_std=0.0001, seed=1024, final_activation='sigmoid',
bias_feature_dim_dict={"sparse": [], "dense": []},
output_dim=1,):
"""Instantiates the Mixed Logistic Regression/Piece-wise Linear Model.
:param region_feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:param base_feature_dim_dict: dict or None,to indicate sparse field and dense field of base learner.if None, it is same as region_feature_dim_dict
:param region_num: integer > 1,indicate the piece number
:param l2_reg_linear: float. L2 regularizer strength applied to weight
:param init_std: float,to use as the initialize std of embedding vector
:param seed: integer ,to use as random seed.
:param final_activation: str,output activation,usually ``'sigmoid'`` or ``'linear'``
:param bias_feature_dim_dict: dict,to indicate sparse field and dense field like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_4','field_5']}
:return: A Keras model instance.
"""
if region_num <= 1:
raise ValueError("region_num must > 1")
if not isinstance(region_feature_dim_dict,
dict) or "sparse" not in region_feature_dim_dict or "dense" not in region_feature_dim_dict:
raise ValueError(
"feature_dim must be a dict like {'sparse':{'field_1':4,'field_2':3,'field_3':2},'dense':['field_5',]}")
same_flag = False
if base_feature_dim_dict == {"sparse": [], "dense": []}:
base_feature_dim_dict = region_feature_dim_dict
same_flag = True
region_sparse_input, region_dense_input, base_sparse_input, base_dense_input, bias_sparse_input, bias_dense_input = get_input(
region_feature_dim_dict, base_feature_dim_dict, bias_feature_dim_dict, same_flag)
region_embeddings, base_embeddings, bias_embedding = get_embedding(
region_num, region_feature_dim_dict, base_feature_dim_dict, bias_feature_dim_dict, init_std, seed, l2_reg_linear)
if same_flag:
base_dense_input_ = region_dense_input
base_sparse_input_ = region_sparse_input
else:
base_dense_input_ = base_dense_input
base_sparse_input_ = base_sparse_input
region_dense_feature_num = len(region_feature_dim_dict['dense'])
region_sparse_feature_num = len(region_feature_dim_dict['sparse'])
base_dense_feature_num = len(base_feature_dim_dict['dense'])
base_sparse_feature_num = len(base_feature_dim_dict['sparse'])
bias_dense_feature_num = len(bias_feature_dim_dict['dense'])
bias_sparse_feature_num = len(bias_feature_dim_dict['sparse'])
if region_dense_feature_num > 1:
region_dense_logits_ = [Dense(1, )(Concatenate()(region_dense_input)) for _ in
range(region_num)]
elif region_dense_feature_num == 1:
region_dense_logits_ = [Dense(1, )(region_dense_input[0]) for _ in
range(region_num)]
if base_dense_feature_num > 1:
base_dense_logits = [Dense(1, )(Concatenate()(base_dense_input_))for _ in
range(region_num)]
elif base_dense_feature_num == 1:
base_dense_logits = [Dense(1, )(base_dense_input_[0])for _ in
range(region_num)]
if region_dense_feature_num > 0 and region_sparse_feature_num == 0:
region_logits = Concatenate()(region_dense_logits_)
elif region_dense_feature_num == 0 and region_sparse_feature_num > 0:
region_sparse_logits = [
add([region_embeddings[j][i](region_sparse_input[i])
for i in range(region_sparse_feature_num)])
if region_sparse_feature_num > 1 else region_embeddings[j][0](region_sparse_input[0])
for j in range(region_num)]
region_logits = Concatenate()(region_sparse_logits)
else:
region_sparse_logits = [
add([region_embeddings[j][i](region_sparse_input[i])
for i in range(region_sparse_feature_num)])
for j in range(region_num)]
region_logits = Concatenate()(
[add([region_sparse_logits[i], region_dense_logits_[i]]) for i in range(region_num)])
if base_dense_feature_num > 0 and base_sparse_feature_num == 0:
base_logits = base_dense_logits
elif base_dense_feature_num == 0 and base_sparse_feature_num > 0:
base_sparse_logits = [add(
[base_embeddings[j][i](base_sparse_input_[i]) for i in range(base_sparse_feature_num)]) if base_sparse_feature_num > 1 else base_embeddings[j][0](base_sparse_input_[0])
for j in range(region_num)]
base_logits = base_sparse_logits
else:
base_sparse_logits = [add(
[base_embeddings[j][i](base_sparse_input_[i]) for i in range(base_sparse_feature_num)]) if base_sparse_feature_num > 1 else base_embeddings[j][0](base_sparse_input_[0])
for j in range(region_num)]
base_logits = [add([base_sparse_logits[i], base_dense_logits[i]])
for i in range(region_num)]
# Dense(self.region_num, activation='softmax')(final_logit)
region_weights = Activation("softmax")(region_logits)
learner_score = Concatenate()(
[Activation(final_activation, name='learner' + str(i))(base_logits[i]) for i in range(region_num)])
final_logit = dot([region_weights, learner_score], axes=-1)
if bias_dense_feature_num + bias_sparse_feature_num > 0:
if bias_dense_feature_num > 1:
bias_dense_logits = Dense(1,)(Concatenate()(bias_dense_input))
elif bias_dense_feature_num == 1:
bias_dense_logits = Dense(1,)(bias_dense_input[0])
else:
pass
if bias_sparse_feature_num > 1:
bias_cate_logits = add([bias_embedding[i](bias_sparse_input[i])
for i, feat in enumerate(bias_feature_dim_dict['sparse'])])
elif bias_sparse_feature_num == 1:
bias_cate_logits = bias_embedding[0](bias_sparse_input[0])
else:
pass
if bias_dense_feature_num > 0 and bias_sparse_feature_num > 0:
bias_logits = add([bias_dense_logits, bias_cate_logits])
elif bias_dense_feature_num > 0:
bias_logits = bias_dense_logits
else:
bias_logits = bias_cate_logits
bias_prob = Activation('sigmoid')(bias_logits)
final_logit = dot([final_logit, bias_prob], axes=-1)
output = Reshape([1])(final_logit)
model = Model(inputs=region_sparse_input + region_dense_input+base_sparse_input +
base_dense_input+bias_sparse_input+bias_dense_input, outputs=output)
return model |
Python | def mean_fscore(predict_mask_seq, truth_mask_seq, iou_thresholds=THRESHOLDS, beta=2.0):
""" calculates the average FScore for the predictions in an image over
the iou_thresholds sets.
predict_mask_seq: list of masks of the predicted objects in the image
truth_mask_seq: list of masks of ground-truth objects in the image
"""
return np.mean(
[
fscore(tp, fn, fp, beta)
for (tp, fn, fp
) in [confusion_counts(predict_mask_seq, truth_mask_seq, iou_thresh) for iou_thresh in iou_thresholds]
]
) | def mean_fscore(predict_mask_seq, truth_mask_seq, iou_thresholds=THRESHOLDS, beta=2.0):
""" calculates the average FScore for the predictions in an image over
the iou_thresholds sets.
predict_mask_seq: list of masks of the predicted objects in the image
truth_mask_seq: list of masks of ground-truth objects in the image
"""
return np.mean(
[
fscore(tp, fn, fp, beta)
for (tp, fn, fp
) in [confusion_counts(predict_mask_seq, truth_mask_seq, iou_thresh) for iou_thresh in iou_thresholds]
]
) |
Python | def clean_sub_graphs(G_, min_length=150, max_nodes_to_skip=100,
weight='length_pix', verbose=False,
super_verbose=False):
'''Remove subgraphs with a max path length less than min_length,
if the subgraph has more than max_noxes_to_skip, don't check length
(this step great improves processing time)'''
if len(G_.nodes()) == 0:
return G_
print("Running clean_sub_graphs...")
sub_graphs = list(nx.connected_component_subgraphs(G_))
bad_nodes = []
if verbose:
print(" len(G_.nodes()):", len(G_.nodes()))
print(" len(G_.edges()):", len(G_.edges()))
if super_verbose:
print("G_.nodes:", G_.nodes())
edge_tmp = G_.edges()[np.random.randint(len(G_.edges()))]
print(edge_tmp, "G.edge props:", G_.edge[edge_tmp[0]][edge_tmp[1]])
for G_sub in sub_graphs:
# don't check length if too many nodes in subgraph
if len(G_sub.nodes()) > max_nodes_to_skip:
continue
else:
all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))
if super_verbose:
print(" \nGs.nodes:", G_sub.nodes())
print(" all_lengths:", all_lengths)
# get all lenghts
lens = []
# for u,v in all_lengths.iteritems():
for u in all_lengths.keys():
v = all_lengths[u]
# for uprime, vprime in v.iteritems():
for uprime in v.keys():
vprime = v[uprime]
lens.append(vprime)
if super_verbose:
print(" u, v", u, v)
print(" uprime, vprime:", uprime, vprime)
max_len = np.max(lens)
if super_verbose:
print(" Max length of path:", max_len)
if max_len < min_length:
bad_nodes.extend(G_sub.nodes())
if super_verbose:
print(" appending to bad_nodes:", G_sub.nodes())
# remove bad_nodes
G_.remove_nodes_from(bad_nodes)
if verbose:
print(" num bad_nodes:", len(bad_nodes))
# print ("bad_nodes:", bad_nodes)
print(" len(G'.nodes()):", len(G_.nodes()))
print(" len(G'.edges()):", len(G_.edges()))
if super_verbose:
print(" G_.nodes:", G_.nodes())
return G_ | def clean_sub_graphs(G_, min_length=150, max_nodes_to_skip=100,
weight='length_pix', verbose=False,
super_verbose=False):
'''Remove subgraphs with a max path length less than min_length,
if the subgraph has more than max_noxes_to_skip, don't check length
(this step great improves processing time)'''
if len(G_.nodes()) == 0:
return G_
print("Running clean_sub_graphs...")
sub_graphs = list(nx.connected_component_subgraphs(G_))
bad_nodes = []
if verbose:
print(" len(G_.nodes()):", len(G_.nodes()))
print(" len(G_.edges()):", len(G_.edges()))
if super_verbose:
print("G_.nodes:", G_.nodes())
edge_tmp = G_.edges()[np.random.randint(len(G_.edges()))]
print(edge_tmp, "G.edge props:", G_.edge[edge_tmp[0]][edge_tmp[1]])
for G_sub in sub_graphs:
# don't check length if too many nodes in subgraph
if len(G_sub.nodes()) > max_nodes_to_skip:
continue
else:
all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))
if super_verbose:
print(" \nGs.nodes:", G_sub.nodes())
print(" all_lengths:", all_lengths)
# get all lenghts
lens = []
# for u,v in all_lengths.iteritems():
for u in all_lengths.keys():
v = all_lengths[u]
# for uprime, vprime in v.iteritems():
for uprime in v.keys():
vprime = v[uprime]
lens.append(vprime)
if super_verbose:
print(" u, v", u, v)
print(" uprime, vprime:", uprime, vprime)
max_len = np.max(lens)
if super_verbose:
print(" Max length of path:", max_len)
if max_len < min_length:
bad_nodes.extend(G_sub.nodes())
if super_verbose:
print(" appending to bad_nodes:", G_sub.nodes())
# remove bad_nodes
G_.remove_nodes_from(bad_nodes)
if verbose:
print(" num bad_nodes:", len(bad_nodes))
# print ("bad_nodes:", bad_nodes)
print(" len(G'.nodes()):", len(G_.nodes()))
print(" len(G'.edges()):", len(G_.edges()))
if super_verbose:
print(" G_.nodes:", G_.nodes())
return G_ |
Python | def dl_post_process_pred(mask, glob_thresh=80, kernel_size=9,
min_area=2000, contour_smoothing=0.001,
adapt_kernel=85, adapt_const=-3,
outplot_file='', dpi=500, use_glob_thresh=False,
kernel_open=19, verbose=False):
'''Refine mask file and return both refined mask and skeleton'''
t0 = time.time()
kernel_blur = kernel_size # 9
kernel_close = kernel_size # 9
# kernel_open = kernel_size #9
kernel_close = np.ones((kernel_close, kernel_close), np.uint8)
kernel_open = np.ones((kernel_open, kernel_open), np.uint8)
blur = cv2.medianBlur(mask, kernel_blur)
# global thresh
glob_thresh_arr = cv2.threshold(blur, glob_thresh, 1, cv2.THRESH_BINARY)[1]
glob_thresh_arr_smooth = cv2.medianBlur(glob_thresh_arr, kernel_blur)
t1 = time.time()
print("Time to compute open(), close(), and get thresholds:", t1 - t0, "seconds")
if use_glob_thresh:
mask_thresh = glob_thresh_arr_smooth
else:
adapt_thresh = cv2.adaptiveThreshold(mask, 1, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, adapt_kernel, adapt_const)
# resmooth
adapt_thresh_smooth = cv2.medianBlur(adapt_thresh, kernel_blur)
mask_thresh = adapt_thresh_smooth
# opening and closing
# http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html
# gradient = cv2.morphologyEx(mask_thresh, cv2.MORPH_GRADIENT, kernel)
closing = cv2.morphologyEx(mask_thresh, cv2.MORPH_CLOSE, kernel_close)
opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel_open)
# try on bgRemoved?
t2 = time.time()
print("Time to compute adaptive_thresh, second open(), close():", t2 - t1, "seconds")
# set output
if contour_smoothing < 0:
final_mask = opening
else:
# contours
# remove small items
contours, cont_plot, hole_idxs = get_contours_complex(opening,
min_thresh=glob_thresh,
min_area=min_area,
contour_smoothing=contour_smoothing)
# for some reason contours don't extend to the edge, so clip the edge
# and resize
mask_filt_raw = get_mask(mask_thresh, cont_plot, hole_idxs=hole_idxs)
shape_tmp = mask_filt_raw.shape
mask_filt1 = 200 * cv2.resize(mask_filt_raw[2:-2, 2:-2], shape_tmp).astype(np.uint8)
if verbose:
print("mask:", mask)
print("mask.dtype:", mask.dtype)
print("mask_fi1t1.dtype:", mask_filt1.dtype)
print("mask.shape == mask_filt1.shape:", mask.shape == mask_filt1.shape)
print("mask_filt1.shape:", mask_filt1.shape)
print("mask_filt1", mask_filt1)
# thresh and resmooth
mask_filt = cv2.GaussianBlur(mask_filt1, (kernel_blur, kernel_blur), 0)
# mask_filt = cv2.threshold(mask_filt2, glob_thresh, 1, cv2.THRESH_BINARY)
final_mask = mask_filt
t3 = time.time()
print("Time to smooth contours:", t3 - t2, "seconds")
# skeletonize
# medial = medial_axis(final_mask)
# medial_int = medial.astype(np.uint8)
medial_int = medial_axis(final_mask).astype(np.uint8)
print("Time to compute medial_axis:", time.time() - t3, "seconds")
print("Time to run dl_post_process_pred():", time.time() - t0, "seconds")
return final_mask, medial_int | def dl_post_process_pred(mask, glob_thresh=80, kernel_size=9,
min_area=2000, contour_smoothing=0.001,
adapt_kernel=85, adapt_const=-3,
outplot_file='', dpi=500, use_glob_thresh=False,
kernel_open=19, verbose=False):
'''Refine mask file and return both refined mask and skeleton'''
t0 = time.time()
kernel_blur = kernel_size # 9
kernel_close = kernel_size # 9
# kernel_open = kernel_size #9
kernel_close = np.ones((kernel_close, kernel_close), np.uint8)
kernel_open = np.ones((kernel_open, kernel_open), np.uint8)
blur = cv2.medianBlur(mask, kernel_blur)
# global thresh
glob_thresh_arr = cv2.threshold(blur, glob_thresh, 1, cv2.THRESH_BINARY)[1]
glob_thresh_arr_smooth = cv2.medianBlur(glob_thresh_arr, kernel_blur)
t1 = time.time()
print("Time to compute open(), close(), and get thresholds:", t1 - t0, "seconds")
if use_glob_thresh:
mask_thresh = glob_thresh_arr_smooth
else:
adapt_thresh = cv2.adaptiveThreshold(mask, 1, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, \
cv2.THRESH_BINARY, adapt_kernel, adapt_const)
# resmooth
adapt_thresh_smooth = cv2.medianBlur(adapt_thresh, kernel_blur)
mask_thresh = adapt_thresh_smooth
# opening and closing
# http://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_morphological_ops/py_morphological_ops.html
# gradient = cv2.morphologyEx(mask_thresh, cv2.MORPH_GRADIENT, kernel)
closing = cv2.morphologyEx(mask_thresh, cv2.MORPH_CLOSE, kernel_close)
opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel_open)
# try on bgRemoved?
t2 = time.time()
print("Time to compute adaptive_thresh, second open(), close():", t2 - t1, "seconds")
# set output
if contour_smoothing < 0:
final_mask = opening
else:
# contours
# remove small items
contours, cont_plot, hole_idxs = get_contours_complex(opening,
min_thresh=glob_thresh,
min_area=min_area,
contour_smoothing=contour_smoothing)
# for some reason contours don't extend to the edge, so clip the edge
# and resize
mask_filt_raw = get_mask(mask_thresh, cont_plot, hole_idxs=hole_idxs)
shape_tmp = mask_filt_raw.shape
mask_filt1 = 200 * cv2.resize(mask_filt_raw[2:-2, 2:-2], shape_tmp).astype(np.uint8)
if verbose:
print("mask:", mask)
print("mask.dtype:", mask.dtype)
print("mask_fi1t1.dtype:", mask_filt1.dtype)
print("mask.shape == mask_filt1.shape:", mask.shape == mask_filt1.shape)
print("mask_filt1.shape:", mask_filt1.shape)
print("mask_filt1", mask_filt1)
# thresh and resmooth
mask_filt = cv2.GaussianBlur(mask_filt1, (kernel_blur, kernel_blur), 0)
# mask_filt = cv2.threshold(mask_filt2, glob_thresh, 1, cv2.THRESH_BINARY)
final_mask = mask_filt
t3 = time.time()
print("Time to smooth contours:", t3 - t2, "seconds")
# skeletonize
# medial = medial_axis(final_mask)
# medial_int = medial.astype(np.uint8)
medial_int = medial_axis(final_mask).astype(np.uint8)
print("Time to compute medial_axis:", time.time() - t3, "seconds")
print("Time to run dl_post_process_pred():", time.time() - t0, "seconds")
return final_mask, medial_int |
Python | def make_skeleton(img_loc, thresh, debug, fix_borders, replicate=5,
clip=2, img_shape=(1300, 1300), img_mult=255, hole_size=300,
cv2_kernel_close=7, cv2_kernel_open=7,
use_medial_axis=False, max_out_size=(200000, 200000),
num_classes=1, skeleton_band='all'):
'''
Extract a skeleton from a mask.
skeleton_band is the index of the band of the mask to use for
skeleton extractionk, set to string 'all' to use all bands
'''
print("Executing make_skeleton...")
t0 = time.time()
# replicate = 5
# clip = 2
rec = replicate + clip
# read in data
if num_classes == 1:
try:
img = cv2.imread(img_loc, cv2.IMREAD_GRAYSCALE)
except:
img = skimage.io.imread(img_loc, as_gray=True).astype(np.uint8) # [::-1]
else:
# ensure 8bit?
img_tmp = skimage.io.imread(img_loc).astype(np.uint8)
# we want skimage to read in (channels, h, w) for multi-channel
# assume less than 20 channels
if img_tmp.shape[0] > 20:
img_full = np.moveaxis(img_tmp, 0, -1)
else:
img_full = img_tmp
# select the desired band for skeleton extraction
# if < 0, sum all bands
if type(skeleton_band) == str: # skeleton_band < 0:
img = np.sum(img_full, axis=0).astype(np.int8)
else:
img = img_full[skeleton_band, :, :]
print("make_skeleton(), input img_shape:", img_shape)
print("make_skeleton(), img.shape:", img.shape)
print("make_skeleton(), img.size:", img.size)
print("make_skeleton(), img dtype:", img.dtype)
# print ("make_skeleton(), img unique:", np.unique(img))
##########
# potentially keep only subset of data
shape0 = img.shape
img = img[:max_out_size[0], :max_out_size[1]]
if img.shape != shape0:
print("Using only subset of data!!!!!!!!")
print("make_skeletion() new img.shape:", img.shape)
##########
if fix_borders:
img = cv2.copyMakeBorder(img, replicate, replicate, replicate,
replicate, cv2.BORDER_REPLICATE)
img_copy = None
if debug:
if fix_borders:
img_copy = np.copy(img[replicate:-replicate, replicate:-replicate])
else:
img_copy = np.copy(img)
print("run preprocess()...")
t1 = time.time()
img = preprocess(img, thresh, img_mult=img_mult, hole_size=hole_size,
cv2_kernel_close=cv2_kernel_close,
cv2_kernel_open=cv2_kernel_open)
t2 = time.time()
print("Time to run preprocess():", t2 - t1, "seconds")
if not np.any(img):
return None, None
if not use_medial_axis:
print("skeletonize...")
ske = skeletonize(img).astype(np.uint16)
t3 = time.time()
print("Time to run skimage.skeletonize():", t3 - t2, "seconds")
else:
print("running skimaage.medial_axis...")
ske = skimage.morphology.medial_axis(img).astype(np.uint16)
t3 = time.time()
print("Time to run skimage.medial_axis():", t3 - t2, "seconds")
if fix_borders:
print("fix_borders...")
ske = ske[rec:-rec, rec:-rec]
ske = cv2.copyMakeBorder(ske, clip, clip, clip, clip, cv2.BORDER_CONSTANT, value=0)
t4 = time.time()
print("Time fix borders:", t4 - t3, "seconds")
t1 = time.time()
print("ske.shape:", ske.shape)
print("Time to run make_skeleton:", t1 - t0, "seconds")
return img, ske | def make_skeleton(img_loc, thresh, debug, fix_borders, replicate=5,
clip=2, img_shape=(1300, 1300), img_mult=255, hole_size=300,
cv2_kernel_close=7, cv2_kernel_open=7,
use_medial_axis=False, max_out_size=(200000, 200000),
num_classes=1, skeleton_band='all'):
'''
Extract a skeleton from a mask.
skeleton_band is the index of the band of the mask to use for
skeleton extractionk, set to string 'all' to use all bands
'''
print("Executing make_skeleton...")
t0 = time.time()
# replicate = 5
# clip = 2
rec = replicate + clip
# read in data
if num_classes == 1:
try:
img = cv2.imread(img_loc, cv2.IMREAD_GRAYSCALE)
except:
img = skimage.io.imread(img_loc, as_gray=True).astype(np.uint8) # [::-1]
else:
# ensure 8bit?
img_tmp = skimage.io.imread(img_loc).astype(np.uint8)
# we want skimage to read in (channels, h, w) for multi-channel
# assume less than 20 channels
if img_tmp.shape[0] > 20:
img_full = np.moveaxis(img_tmp, 0, -1)
else:
img_full = img_tmp
# select the desired band for skeleton extraction
# if < 0, sum all bands
if type(skeleton_band) == str: # skeleton_band < 0:
img = np.sum(img_full, axis=0).astype(np.int8)
else:
img = img_full[skeleton_band, :, :]
print("make_skeleton(), input img_shape:", img_shape)
print("make_skeleton(), img.shape:", img.shape)
print("make_skeleton(), img.size:", img.size)
print("make_skeleton(), img dtype:", img.dtype)
# print ("make_skeleton(), img unique:", np.unique(img))
##########
# potentially keep only subset of data
shape0 = img.shape
img = img[:max_out_size[0], :max_out_size[1]]
if img.shape != shape0:
print("Using only subset of data!!!!!!!!")
print("make_skeletion() new img.shape:", img.shape)
##########
if fix_borders:
img = cv2.copyMakeBorder(img, replicate, replicate, replicate,
replicate, cv2.BORDER_REPLICATE)
img_copy = None
if debug:
if fix_borders:
img_copy = np.copy(img[replicate:-replicate, replicate:-replicate])
else:
img_copy = np.copy(img)
print("run preprocess()...")
t1 = time.time()
img = preprocess(img, thresh, img_mult=img_mult, hole_size=hole_size,
cv2_kernel_close=cv2_kernel_close,
cv2_kernel_open=cv2_kernel_open)
t2 = time.time()
print("Time to run preprocess():", t2 - t1, "seconds")
if not np.any(img):
return None, None
if not use_medial_axis:
print("skeletonize...")
ske = skeletonize(img).astype(np.uint16)
t3 = time.time()
print("Time to run skimage.skeletonize():", t3 - t2, "seconds")
else:
print("running skimaage.medial_axis...")
ske = skimage.morphology.medial_axis(img).astype(np.uint16)
t3 = time.time()
print("Time to run skimage.medial_axis():", t3 - t2, "seconds")
if fix_borders:
print("fix_borders...")
ske = ske[rec:-rec, rec:-rec]
ske = cv2.copyMakeBorder(ske, clip, clip, clip, clip, cv2.BORDER_CONSTANT, value=0)
t4 = time.time()
print("Time fix borders:", t4 - t3, "seconds")
t1 = time.time()
print("ske.shape:", ske.shape)
print("Time to run make_skeleton:", t1 - t0, "seconds")
return img, ske |
Python | def build_wkt_dir(indir, outfile, out_ske_dir, out_gdir='', thresh=0.3,
# threshes={'2': .3, '3': .3, '4': .3, '5': .2},
im_prefix='',
debug=False, add_small=True, fix_borders=True,
img_shape=(1300, 1300),
skel_replicate=5, skel_clip=2,
img_mult=255,
hole_size=300, cv2_kernel_close=7, cv2_kernel_open=7,
min_subgraph_length_pix=50,
spacenet_naming_convention=False,
num_classes=1,
max_out_size=(100000, 100000),
skeleton_band='all'):
'''Execute built_graph_wkt for an entire folder
Split image name on AOI, keep only name after AOI. This is necessary for
scoring'''
all_data = []
im_files = np.sort([z for z in os.listdir(indir) if z.endswith('.tif')])
nfiles = len(im_files)
imfiles_args = [[imf, nfiles, indir, outfile, out_ske_dir, out_gdir, thresh, im_prefix,
debug, add_small, fix_borders, img_shape, skel_replicate, skel_clip, img_mult,
hole_size, cv2_kernel_close, cv2_kernel_open,
min_subgraph_length_pix,
spacenet_naming_convention,
num_classes,
max_out_size,
skeleton_band] for imf in im_files]
with multiprocessing.Pool(16) as pool:
all_data = list(pool.starmap(build_wkt_imfile, imfiles_args))
all_data = [data for cur_data in all_data for data in cur_data]
# for i,imfile in enumerate(im_files):
# t1 = time.time()
# print ("\n\n", i+1, "/", nfiles, ":", imfile)
# logger1.info("{x} / {y} : {z}".format(x=i+1, y=nfiles, z=imfile))
# img_loc = os.path.join(indir, imfile)
#
# if spacenet_naming_convention:
# im_root = 'AOI' + imfile.split('AOI')[-1].split('.')[0]
# else:
# im_root = imfile.split('.')[0]
# if len(im_prefix) > 0:
# im_root = im_root.split(im_prefix)[-1]
#
# print (" img_loc:", img_loc)
# print (" im_root:", im_root)
# if out_ske_dir:
# out_ske_file = os.path.join(out_ske_dir, imfile)
# else:
# out_ske_file = ''
# print (" out_ske_file:", out_ske_file)
# if len(out_gdir) > 0:
# out_gpickle = os.path.join(out_gdir, imfile.split('.')[0] + '.gpickle')
# else:
# out_gpickle = ''
#
# # create wkt list
# wkt_list = build_graph_wkt(img_loc, out_ske_file,
# out_gpickle=out_gpickle, thresh=thresh, #threshes={'2': .3, '3': .3, '4': .3, '5': .2},
# debug=debug, add_small=add_small, fix_borders=fix_borders,
# img_shape=img_shape,
# skel_replicate=skel_replicate, skel_clip=skel_clip,
# img_mult=img_mult, hole_size=hole_size,
# cv2_kernel_close=cv2_kernel_close, cv2_kernel_open=cv2_kernel_open,
# min_subgraph_length_pix=min_subgraph_length_pix,
# max_out_size=max_out_size,
# num_classes=num_classes,
# skeleton_band=skeleton_band)
# # add to all_data
# for v in wkt_list:
# all_data.append((im_root, v))
# #all_data.append((imfile, v))
# t2 = time.time()
# logger1.info("Time to build graph: {} seconds".format(t2-t1))
#
# save to csv
df = pd.DataFrame(all_data, columns=['ImageId', 'WKT_Pix'])
df.to_csv(outfile, index=False)
return df | def build_wkt_dir(indir, outfile, out_ske_dir, out_gdir='', thresh=0.3,
# threshes={'2': .3, '3': .3, '4': .3, '5': .2},
im_prefix='',
debug=False, add_small=True, fix_borders=True,
img_shape=(1300, 1300),
skel_replicate=5, skel_clip=2,
img_mult=255,
hole_size=300, cv2_kernel_close=7, cv2_kernel_open=7,
min_subgraph_length_pix=50,
spacenet_naming_convention=False,
num_classes=1,
max_out_size=(100000, 100000),
skeleton_band='all'):
'''Execute built_graph_wkt for an entire folder
Split image name on AOI, keep only name after AOI. This is necessary for
scoring'''
all_data = []
im_files = np.sort([z for z in os.listdir(indir) if z.endswith('.tif')])
nfiles = len(im_files)
imfiles_args = [[imf, nfiles, indir, outfile, out_ske_dir, out_gdir, thresh, im_prefix,
debug, add_small, fix_borders, img_shape, skel_replicate, skel_clip, img_mult,
hole_size, cv2_kernel_close, cv2_kernel_open,
min_subgraph_length_pix,
spacenet_naming_convention,
num_classes,
max_out_size,
skeleton_band] for imf in im_files]
with multiprocessing.Pool(16) as pool:
all_data = list(pool.starmap(build_wkt_imfile, imfiles_args))
all_data = [data for cur_data in all_data for data in cur_data]
# for i,imfile in enumerate(im_files):
# t1 = time.time()
# print ("\n\n", i+1, "/", nfiles, ":", imfile)
# logger1.info("{x} / {y} : {z}".format(x=i+1, y=nfiles, z=imfile))
# img_loc = os.path.join(indir, imfile)
#
# if spacenet_naming_convention:
# im_root = 'AOI' + imfile.split('AOI')[-1].split('.')[0]
# else:
# im_root = imfile.split('.')[0]
# if len(im_prefix) > 0:
# im_root = im_root.split(im_prefix)[-1]
#
# print (" img_loc:", img_loc)
# print (" im_root:", im_root)
# if out_ske_dir:
# out_ske_file = os.path.join(out_ske_dir, imfile)
# else:
# out_ske_file = ''
# print (" out_ske_file:", out_ske_file)
# if len(out_gdir) > 0:
# out_gpickle = os.path.join(out_gdir, imfile.split('.')[0] + '.gpickle')
# else:
# out_gpickle = ''
#
# # create wkt list
# wkt_list = build_graph_wkt(img_loc, out_ske_file,
# out_gpickle=out_gpickle, thresh=thresh, #threshes={'2': .3, '3': .3, '4': .3, '5': .2},
# debug=debug, add_small=add_small, fix_borders=fix_borders,
# img_shape=img_shape,
# skel_replicate=skel_replicate, skel_clip=skel_clip,
# img_mult=img_mult, hole_size=hole_size,
# cv2_kernel_close=cv2_kernel_close, cv2_kernel_open=cv2_kernel_open,
# min_subgraph_length_pix=min_subgraph_length_pix,
# max_out_size=max_out_size,
# num_classes=num_classes,
# skeleton_band=skeleton_band)
# # add to all_data
# for v in wkt_list:
# all_data.append((im_root, v))
# #all_data.append((imfile, v))
# t2 = time.time()
# logger1.info("Time to build graph: {} seconds".format(t2-t1))
#
# save to csv
df = pd.DataFrame(all_data, columns=['ImageId', 'WKT_Pix'])
df.to_csv(outfile, index=False)
return df |
Python | def weighted_avg_and_std(values, weights):
"""
Return the weighted average and standard deviation.
values, weights -- Numpy ndarrays with the same shape.
"""
weighted_stats = DescrStatsW(values, weights=weights, ddof=0)
mean = weighted_stats.mean # weighted mean of data (equivalent to np.average(array, weights=weights))
std = weighted_stats.std # standard deviation with default degrees of freedom correction
var = weighted_stats.var # variance with default degrees of freedom correction
return (mean, std, var) | def weighted_avg_and_std(values, weights):
"""
Return the weighted average and standard deviation.
values, weights -- Numpy ndarrays with the same shape.
"""
weighted_stats = DescrStatsW(values, weights=weights, ddof=0)
mean = weighted_stats.mean # weighted mean of data (equivalent to np.average(array, weights=weights))
std = weighted_stats.std # standard deviation with default degrees of freedom correction
var = weighted_stats.var # variance with default degrees of freedom correction
return (mean, std, var) |
Python | def load_speed_conversion_dict_contin(csv_loc):
'''Load speed to burn_val conversion dataframe
and create conversion dictionary.
Assume continuous conversion'''
df_ = pd.read_csv(csv_loc, index_col=0)
# get dict of pixel value to speed
df_tmp = df_.set_index('burn_val')
dic = df_tmp.to_dict()['speed']
return df_, dic | def load_speed_conversion_dict_contin(csv_loc):
'''Load speed to burn_val conversion dataframe
and create conversion dictionary.
Assume continuous conversion'''
df_ = pd.read_csv(csv_loc, index_col=0)
# get dict of pixel value to speed
df_tmp = df_.set_index('burn_val')
dic = df_tmp.to_dict()['speed']
return df_, dic |
Python | def load_speed_conversion_dict_binned(csv_loc, speed_increment=5):
'''Load speed to burn_val conversion dataframe
and create conversion dictionary.
speed_increment is the increment of speed limits in mph
10 mph bins go from 1-10, and 21-30, etc.
breakdown of speed limits in training set:
# 15.0 5143
# 18.75 6232
# 20.0 18098
# 22.5 347
# 25.0 16526
# 26.25 50
# 30.0 734
# 33.75 26
# 35.0 3583
# 41.25 16
# 45.0 2991
# 48.75 17
# 55.0 2081
# 65.0 407
Assuming a similar distribut in the test set allos us to
'''
df_ = pd.read_csv(csv_loc, index_col=0)
# get dict of channel to speed
df = df_[['channel', 'speed']]
# simple mean of speed bins
means = df.groupby(['channel']).mean().astype(int)
dic = means.to_dict()['speed']
# speeds are every 5 mph, so take the mean of the 5 mph bins
# z = [tmp for tmp in a if tmp%5==0]
# or just add increment/2 to means...
dic.update((x, y + speed_increment / 2) for x, y in dic.items())
##########
# OPTIONAL
# if using 10mph bins, update dic
dic[0] = 7.5
dic[1] = 15 # 15, 18.75, and 20 are all common
dic[2] = 18.75 # 15, 18.75, and 20 are all common
dic[3] = 20 # 15, 18.75, and 20 are all common
dic[4] = 25 # 25 mph speed limit is ubiquitous
dic[5] = 30 # 35 mph speed limit is ubiquitous
dic[6] = 35 # 35 mph speed limit is ubiquitous
dic[7] = 45 # 45 mph speed limit is ubiquitous
dic[8] = 55 # 55 mph speed limit is ubiquitous
dic[9] = 65 # 65 mph speed limit is ubiquitous
##########
return df_, dic | def load_speed_conversion_dict_binned(csv_loc, speed_increment=5):
'''Load speed to burn_val conversion dataframe
and create conversion dictionary.
speed_increment is the increment of speed limits in mph
10 mph bins go from 1-10, and 21-30, etc.
breakdown of speed limits in training set:
# 15.0 5143
# 18.75 6232
# 20.0 18098
# 22.5 347
# 25.0 16526
# 26.25 50
# 30.0 734
# 33.75 26
# 35.0 3583
# 41.25 16
# 45.0 2991
# 48.75 17
# 55.0 2081
# 65.0 407
Assuming a similar distribut in the test set allos us to
'''
df_ = pd.read_csv(csv_loc, index_col=0)
# get dict of channel to speed
df = df_[['channel', 'speed']]
# simple mean of speed bins
means = df.groupby(['channel']).mean().astype(int)
dic = means.to_dict()['speed']
# speeds are every 5 mph, so take the mean of the 5 mph bins
# z = [tmp for tmp in a if tmp%5==0]
# or just add increment/2 to means...
dic.update((x, y + speed_increment / 2) for x, y in dic.items())
##########
# OPTIONAL
# if using 10mph bins, update dic
dic[0] = 7.5
dic[1] = 15 # 15, 18.75, and 20 are all common
dic[2] = 18.75 # 15, 18.75, and 20 are all common
dic[3] = 20 # 15, 18.75, and 20 are all common
dic[4] = 25 # 25 mph speed limit is ubiquitous
dic[5] = 30 # 35 mph speed limit is ubiquitous
dic[6] = 35 # 35 mph speed limit is ubiquitous
dic[7] = 45 # 45 mph speed limit is ubiquitous
dic[8] = 55 # 55 mph speed limit is ubiquitous
dic[9] = 65 # 65 mph speed limit is ubiquitous
##########
return df_, dic |
Python | def infer_travel_time(G_, mask, conv_dict,
min_z=128, dx=4, dy=4,
percentile=90,
use_totband=True, use_weighted_mean=True,
variable_edge_speed=False,
verbose=False):
'''Get an estimate of the average speed and travel time of each edge
in the graph from the mask and conversion dictionary
For each edge, get the geometry in pixel coords
For each point, get the neareast neighbors in the maks and infer
the local speed'''
mph_to_mps = 0.44704 # miles per hour to meters per second
for i, (u, v, edge_data) in enumerate(G_.edges(data=True)):
if verbose: # (i % 100) == 0:
logger1.info("\n" + str(i) + " " + str(u) + " " + str(v) + " " \
+ str(edge_data))
if (i % 1000) == 0:
logger1.info(str(i) + " / " + str(len(G_.edges())) + " edges")
tot_hours, mean_speed_mph, length_miles = \
get_edge_time_properties(mask, edge_data, conv_dict,
min_z=min_z, dx=dx, dy=dy,
percentile=percentile,
use_totband=use_totband,
use_weighted_mean=use_weighted_mean,
variable_edge_speed=variable_edge_speed,
verbose=verbose)
# update edges
edge_data['Travel Time (h)'] = tot_hours
edge_data['inferred_speed_mph'] = np.round(mean_speed_mph, 2)
edge_data['length_miles'] = length_miles
edge_data['inferred_speed_mps'] = np.round(mean_speed_mph * mph_to_mps, 2)
edge_data['travel_time_s'] = np.round(3600. * tot_hours, 3)
# edge_data['travel_time'] = np.round(3600. * tot_hours, 3)
return G_ | def infer_travel_time(G_, mask, conv_dict,
min_z=128, dx=4, dy=4,
percentile=90,
use_totband=True, use_weighted_mean=True,
variable_edge_speed=False,
verbose=False):
'''Get an estimate of the average speed and travel time of each edge
in the graph from the mask and conversion dictionary
For each edge, get the geometry in pixel coords
For each point, get the neareast neighbors in the maks and infer
the local speed'''
mph_to_mps = 0.44704 # miles per hour to meters per second
for i, (u, v, edge_data) in enumerate(G_.edges(data=True)):
if verbose: # (i % 100) == 0:
logger1.info("\n" + str(i) + " " + str(u) + " " + str(v) + " " \
+ str(edge_data))
if (i % 1000) == 0:
logger1.info(str(i) + " / " + str(len(G_.edges())) + " edges")
tot_hours, mean_speed_mph, length_miles = \
get_edge_time_properties(mask, edge_data, conv_dict,
min_z=min_z, dx=dx, dy=dy,
percentile=percentile,
use_totband=use_totband,
use_weighted_mean=use_weighted_mean,
variable_edge_speed=variable_edge_speed,
verbose=verbose)
# update edges
edge_data['Travel Time (h)'] = tot_hours
edge_data['inferred_speed_mph'] = np.round(mean_speed_mph, 2)
edge_data['length_miles'] = length_miles
edge_data['inferred_speed_mps'] = np.round(mean_speed_mph * mph_to_mps, 2)
edge_data['travel_time_s'] = np.round(3600. * tot_hours, 3)
# edge_data['travel_time'] = np.round(3600. * tot_hours, 3)
return G_ |
Python | def add_travel_time_dir(graph_dir, mask_dir, conv_dict, graph_dir_out,
min_z=128, dx=4, dy=4, percentile=90,
use_totband=True, use_weighted_mean=True,
variable_edge_speed=False, mask_prefix='',
save_shapefiles=True,
verbose=False):
'''Update graph properties to include travel time for entire directory'''
pickle_protocol = 4 # 4 is most recent, python 2.7 can't read 4
logger1.info("Updating graph properties to include travel time")
logger1.info(" Writing to: " + str(graph_dir_out))
os.makedirs(graph_dir_out, exist_ok=True)
image_names = sorted([z for z in os.listdir(mask_dir) if z.endswith('.tif')])
for i, image_name in enumerate(image_names):
im_root = image_name.split('.')[0]
if len(mask_prefix) > 0:
im_root = im_root.split(mask_prefix)[-1]
out_file = os.path.join(graph_dir_out, im_root + '.gpickle')
if (i % 1) == 0:
logger1.info("\n" + str(i + 1) + " / " + str(len(image_names)) + " " + image_name + " " + im_root)
mask_path = os.path.join(mask_dir, image_name)
graph_path = os.path.join(graph_dir, im_root + '.gpickle')
if not os.path.exists(graph_path):
logger1.info(" ", i, "DNE, skipping: " + str(graph_path))
return
# continue
if verbose:
logger1.info("mask_path: " + mask_path)
logger1.info("graph_path: " + graph_path)
mask = skimage.io.imread(mask_path)
if mask.shape[0] > 20:
mask = np.moveaxis(mask, -1, 0)
G_raw = nx.read_gpickle(graph_path)
# see if it's empty
if len(G_raw.nodes()) == 0:
nx.write_gpickle(G_raw, out_file, protocol=pickle_protocol)
continue
G = infer_travel_time(G_raw, mask, conv_dict,
min_z=min_z, dx=dx, dy=dy,
percentile=percentile,
use_totband=use_totband,
use_weighted_mean=use_weighted_mean,
variable_edge_speed=variable_edge_speed,
verbose=verbose)
G = G.to_undirected()
# save graph
# logger1.info("Saving graph to directory: " + graph_dir)
# out_file = os.path.join(graph_dir_out, image_name.split('.')[0] + '.gpickle')
nx.write_gpickle(G, out_file, protocol=pickle_protocol)
# save shapefile as well?
if save_shapefiles:
G_out = G # ox.simplify_graph(G.to_directed())
logger1.info("Saving shapefile to directory: {}".format(graph_dir_out))
ox.save_graph_shapefile(G_out, filename=im_root, folder=graph_dir_out,
encoding='utf-8')
# out_file2 = os.path.join(graph_dir, image_id.split('.')[0] + '.graphml')
# ox.save_graphml(G, image_id.split('.')[0] + '.graphml', folder=graph_dir)
return | def add_travel_time_dir(graph_dir, mask_dir, conv_dict, graph_dir_out,
min_z=128, dx=4, dy=4, percentile=90,
use_totband=True, use_weighted_mean=True,
variable_edge_speed=False, mask_prefix='',
save_shapefiles=True,
verbose=False):
'''Update graph properties to include travel time for entire directory'''
pickle_protocol = 4 # 4 is most recent, python 2.7 can't read 4
logger1.info("Updating graph properties to include travel time")
logger1.info(" Writing to: " + str(graph_dir_out))
os.makedirs(graph_dir_out, exist_ok=True)
image_names = sorted([z for z in os.listdir(mask_dir) if z.endswith('.tif')])
for i, image_name in enumerate(image_names):
im_root = image_name.split('.')[0]
if len(mask_prefix) > 0:
im_root = im_root.split(mask_prefix)[-1]
out_file = os.path.join(graph_dir_out, im_root + '.gpickle')
if (i % 1) == 0:
logger1.info("\n" + str(i + 1) + " / " + str(len(image_names)) + " " + image_name + " " + im_root)
mask_path = os.path.join(mask_dir, image_name)
graph_path = os.path.join(graph_dir, im_root + '.gpickle')
if not os.path.exists(graph_path):
logger1.info(" ", i, "DNE, skipping: " + str(graph_path))
return
# continue
if verbose:
logger1.info("mask_path: " + mask_path)
logger1.info("graph_path: " + graph_path)
mask = skimage.io.imread(mask_path)
if mask.shape[0] > 20:
mask = np.moveaxis(mask, -1, 0)
G_raw = nx.read_gpickle(graph_path)
# see if it's empty
if len(G_raw.nodes()) == 0:
nx.write_gpickle(G_raw, out_file, protocol=pickle_protocol)
continue
G = infer_travel_time(G_raw, mask, conv_dict,
min_z=min_z, dx=dx, dy=dy,
percentile=percentile,
use_totband=use_totband,
use_weighted_mean=use_weighted_mean,
variable_edge_speed=variable_edge_speed,
verbose=verbose)
G = G.to_undirected()
# save graph
# logger1.info("Saving graph to directory: " + graph_dir)
# out_file = os.path.join(graph_dir_out, image_name.split('.')[0] + '.gpickle')
nx.write_gpickle(G, out_file, protocol=pickle_protocol)
# save shapefile as well?
if save_shapefiles:
G_out = G # ox.simplify_graph(G.to_directed())
logger1.info("Saving shapefile to directory: {}".format(graph_dir_out))
ox.save_graph_shapefile(G_out, filename=im_root, folder=graph_dir_out,
encoding='utf-8')
# out_file2 = os.path.join(graph_dir, image_id.split('.')[0] + '.graphml')
# ox.save_graphml(G, image_id.split('.')[0] + '.graphml', folder=graph_dir)
return |
Python | def clean_sub_graphs(G_, min_length=150, max_nodes_to_skip=30,
weight='length_pix', verbose=True,
super_verbose=False):
'''Remove subgraphs with a max path length less than min_length,
if the subgraph has more than max_noxes_to_skip, don't check length
(this step great improves processing time)'''
if len(list(G_.nodes())) == 0:
return G_
print("Running clean_sub_graphs...")
sub_graphs = list(nx.connected_component_subgraphs(G_))
bad_nodes = []
if verbose:
print(" len(G_.nodes()):", len(list(G_.nodes())))
print(" len(G_.edges()):", len(list(G_.edges())))
if super_verbose:
print("G_.nodes:", G_.nodes())
edge_tmp = G_.edges()[np.random.randint(len(G_.edges()))]
print(edge_tmp, "G.edge props:", G_.edge[edge_tmp[0]][edge_tmp[1]])
for G_sub in sub_graphs:
# don't check length if too many nodes in subgraph
if len(G_sub.nodes()) > max_nodes_to_skip:
continue
else:
all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))
if super_verbose:
print(" \nGs.nodes:", G_sub.nodes())
print(" all_lengths:", all_lengths)
# get all lenghts
lens = []
# for u,v in all_lengths.iteritems():
for u in all_lengths.keys():
v = all_lengths[u]
# for uprime, vprime in v.iteritems():
for uprime in v.keys():
vprime = v[uprime]
lens.append(vprime)
if super_verbose:
print(" u, v", u, v)
print(" uprime, vprime:", uprime, vprime)
max_len = np.max(lens)
if super_verbose:
print(" Max length of path:", max_len)
if max_len < min_length:
bad_nodes.extend(G_sub.nodes())
if super_verbose:
print(" appending to bad_nodes:", G_sub.nodes())
# remove bad_nodes
G_.remove_nodes_from(bad_nodes)
if verbose:
print(" num bad_nodes:", len(bad_nodes))
# print ("bad_nodes:", bad_nodes)
print(" len(G'.nodes()):", len(G_.nodes()))
print(" len(G'.edges()):", len(G_.edges()))
if super_verbose:
print(" G_.nodes:", G_.nodes())
return G_ | def clean_sub_graphs(G_, min_length=150, max_nodes_to_skip=30,
weight='length_pix', verbose=True,
super_verbose=False):
'''Remove subgraphs with a max path length less than min_length,
if the subgraph has more than max_noxes_to_skip, don't check length
(this step great improves processing time)'''
if len(list(G_.nodes())) == 0:
return G_
print("Running clean_sub_graphs...")
sub_graphs = list(nx.connected_component_subgraphs(G_))
bad_nodes = []
if verbose:
print(" len(G_.nodes()):", len(list(G_.nodes())))
print(" len(G_.edges()):", len(list(G_.edges())))
if super_verbose:
print("G_.nodes:", G_.nodes())
edge_tmp = G_.edges()[np.random.randint(len(G_.edges()))]
print(edge_tmp, "G.edge props:", G_.edge[edge_tmp[0]][edge_tmp[1]])
for G_sub in sub_graphs:
# don't check length if too many nodes in subgraph
if len(G_sub.nodes()) > max_nodes_to_skip:
continue
else:
all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))
if super_verbose:
print(" \nGs.nodes:", G_sub.nodes())
print(" all_lengths:", all_lengths)
# get all lenghts
lens = []
# for u,v in all_lengths.iteritems():
for u in all_lengths.keys():
v = all_lengths[u]
# for uprime, vprime in v.iteritems():
for uprime in v.keys():
vprime = v[uprime]
lens.append(vprime)
if super_verbose:
print(" u, v", u, v)
print(" uprime, vprime:", uprime, vprime)
max_len = np.max(lens)
if super_verbose:
print(" Max length of path:", max_len)
if max_len < min_length:
bad_nodes.extend(G_sub.nodes())
if super_verbose:
print(" appending to bad_nodes:", G_sub.nodes())
# remove bad_nodes
G_.remove_nodes_from(bad_nodes)
if verbose:
print(" num bad_nodes:", len(bad_nodes))
# print ("bad_nodes:", bad_nodes)
print(" len(G'.nodes()):", len(G_.nodes()))
print(" len(G'.edges()):", len(G_.edges()))
if super_verbose:
print(" G_.nodes:", G_.nodes())
return G_ |
Python | def remove_short_edges(G_, min_spur_length_m=100, verbose=True):
"""Remove unconnected edges shorter than the desired length"""
if verbose:
print("Remove shoert edges")
deg_list = list(G_.degree)
# iterate through list
bad_nodes = []
for i, (n, deg) in enumerate(deg_list):
# if verbose and (i % 500) == 0:
# print(n, deg)
# check if node has only one neighbor
if deg == 1:
# get edge
edge = list(G_.edges(n))
u, v = edge[0]
# get edge length
edge_props = G_.get_edge_data(u, v, 0)
length = edge_props['length']
# edge_props = G_.edges([u, v])
if length < min_spur_length_m:
bad_nodes.append(n)
if verbose:
print(i, "/", len(list(G_.nodes())),
"n, deg, u, v, length:", n, deg, u, v, length)
if verbose:
print("bad_nodes:", bad_nodes)
G_.remove_nodes_from(bad_nodes)
if verbose:
print("num remaining nodes:", len(list(G_.nodes())))
return G_ | def remove_short_edges(G_, min_spur_length_m=100, verbose=True):
"""Remove unconnected edges shorter than the desired length"""
if verbose:
print("Remove shoert edges")
deg_list = list(G_.degree)
# iterate through list
bad_nodes = []
for i, (n, deg) in enumerate(deg_list):
# if verbose and (i % 500) == 0:
# print(n, deg)
# check if node has only one neighbor
if deg == 1:
# get edge
edge = list(G_.edges(n))
u, v = edge[0]
# get edge length
edge_props = G_.get_edge_data(u, v, 0)
length = edge_props['length']
# edge_props = G_.edges([u, v])
if length < min_spur_length_m:
bad_nodes.append(n)
if verbose:
print(i, "/", len(list(G_.nodes())),
"n, deg, u, v, length:", n, deg, u, v, length)
if verbose:
print("bad_nodes:", bad_nodes)
G_.remove_nodes_from(bad_nodes)
if verbose:
print("num remaining nodes:", len(list(G_.nodes())))
return G_ |
Python | def wkt_list_to_nodes_edges(wkt_list):
'''Convert wkt list to nodes and edges
Make an edge between each node in linestring. Since one linestring
may contain multiple edges, this is the safest approach'''
node_loc_set = set() # set of edge locations
node_loc_dic = {} # key = node idx, val = location
node_loc_dic_rev = {} # key = location, val = node idx
edge_loc_set = set() # set of edge locations
edge_dic = {} # edge properties
node_iter = 0
edge_iter = 0
for i, lstring in enumerate(wkt_list):
# get lstring properties
shape = shapely.wkt.loads(lstring)
xs, ys = shape.coords.xy
length_orig = shape.length
# iterate through coords in line to create edges between every point
for j, (x, y) in enumerate(zip(xs, ys)):
loc = (x, y)
# for first item just make node, not edge
if j == 0:
# if not yet seen, create new node
if loc not in node_loc_set:
node_loc_set.add(loc)
node_loc_dic[node_iter] = loc
node_loc_dic_rev[loc] = node_iter
node = node_iter
node_iter += 1
# if not first node in edge, retrieve previous node and build edge
else:
prev_loc = (xs[j - 1], ys[j - 1])
# print ("prev_loc:", prev_loc)
prev_node = node_loc_dic_rev[prev_loc]
# if new, create new node
if loc not in node_loc_set:
node_loc_set.add(loc)
node_loc_dic[node_iter] = loc
node_loc_dic_rev[loc] = node_iter
node = node_iter
node_iter += 1
# if seen before, retrieve node properties
else:
node = node_loc_dic_rev[loc]
# add edge, which is start_node to end_node
edge_loc = (loc, prev_loc)
edge_loc_rev = (prev_loc, loc)
# shouldn't be duplicate edges, so break if we see one
if (edge_loc in edge_loc_set) or (edge_loc_rev in edge_loc_set):
print("Oops, edge already seen, returning:", edge_loc)
return
# get distance to prev_loc and current loc
proj_prev = shape.project(Point(prev_loc))
proj = shape.project(Point(loc))
# edge length is the diffence of the two projected lengths
# along the linestring
edge_length = abs(proj - proj_prev)
# make linestring
line_out = LineString([prev_loc, loc])
line_out_wkt = line_out.wkt
edge_props = {'start': prev_node,
'start_loc_pix': prev_loc,
'end': node,
'end_loc_pix': loc,
'length_pix': edge_length,
'wkt_pix': line_out_wkt,
'geometry_pix': line_out,
'osmid': i}
# print ("edge_props", edge_props)
edge_loc_set.add(edge_loc)
edge_dic[edge_iter] = edge_props
edge_iter += 1
return node_loc_dic, edge_dic | def wkt_list_to_nodes_edges(wkt_list):
'''Convert wkt list to nodes and edges
Make an edge between each node in linestring. Since one linestring
may contain multiple edges, this is the safest approach'''
node_loc_set = set() # set of edge locations
node_loc_dic = {} # key = node idx, val = location
node_loc_dic_rev = {} # key = location, val = node idx
edge_loc_set = set() # set of edge locations
edge_dic = {} # edge properties
node_iter = 0
edge_iter = 0
for i, lstring in enumerate(wkt_list):
# get lstring properties
shape = shapely.wkt.loads(lstring)
xs, ys = shape.coords.xy
length_orig = shape.length
# iterate through coords in line to create edges between every point
for j, (x, y) in enumerate(zip(xs, ys)):
loc = (x, y)
# for first item just make node, not edge
if j == 0:
# if not yet seen, create new node
if loc not in node_loc_set:
node_loc_set.add(loc)
node_loc_dic[node_iter] = loc
node_loc_dic_rev[loc] = node_iter
node = node_iter
node_iter += 1
# if not first node in edge, retrieve previous node and build edge
else:
prev_loc = (xs[j - 1], ys[j - 1])
# print ("prev_loc:", prev_loc)
prev_node = node_loc_dic_rev[prev_loc]
# if new, create new node
if loc not in node_loc_set:
node_loc_set.add(loc)
node_loc_dic[node_iter] = loc
node_loc_dic_rev[loc] = node_iter
node = node_iter
node_iter += 1
# if seen before, retrieve node properties
else:
node = node_loc_dic_rev[loc]
# add edge, which is start_node to end_node
edge_loc = (loc, prev_loc)
edge_loc_rev = (prev_loc, loc)
# shouldn't be duplicate edges, so break if we see one
if (edge_loc in edge_loc_set) or (edge_loc_rev in edge_loc_set):
print("Oops, edge already seen, returning:", edge_loc)
return
# get distance to prev_loc and current loc
proj_prev = shape.project(Point(prev_loc))
proj = shape.project(Point(loc))
# edge length is the diffence of the two projected lengths
# along the linestring
edge_length = abs(proj - proj_prev)
# make linestring
line_out = LineString([prev_loc, loc])
line_out_wkt = line_out.wkt
edge_props = {'start': prev_node,
'start_loc_pix': prev_loc,
'end': node,
'end_loc_pix': loc,
'length_pix': edge_length,
'wkt_pix': line_out_wkt,
'geometry_pix': line_out,
'osmid': i}
# print ("edge_props", edge_props)
edge_loc_set.add(edge_loc)
edge_dic[edge_iter] = edge_props
edge_iter += 1
return node_loc_dic, edge_dic |
Python | def wkt_list_to_nodes_edges_sloppy(wkt_list):
'''Convert wkt list to nodes and edges
Assumes each linestring corresponds to a unique edge
Since this is not always the case, this function fails if a linestring
contains multiple edges'''
node_loc_set = set() # set of edge locations
node_loc_dic = {} # key = node idx, val = location
node_loc_dic_rev = {} # key = location, val = node idx
edge_dic = {} # edge properties
node_iter = 0
edge_iter = 0
for lstring in wkt_list:
# get lstring properties
shape = shapely.wkt.loads(lstring)
x, y = shape.coords.xy
length = shape.length
# set start node
start_loc = (x[0], y[0])
# if new, create new node
if start_loc not in node_loc_set:
node_loc_set.add(start_loc)
node_loc_dic[node_iter] = start_loc
node_loc_dic_rev[start_loc] = node_iter
start_node = node_iter
node_iter += 1
# if seen before, retrieve node properties
else:
start_node = node_loc_dic_rev[start_loc]
# set end node (just like start node)
end_loc = (x[-1], y[-1])
# if new, create new node
if end_loc not in node_loc_set:
node_loc_set.add(end_loc)
node_loc_dic[node_iter] = end_loc
node_loc_dic_rev[end_loc] = node_iter
end_node = node_iter
node_iter += 1
# if seen before, retrieve node properties
else:
end_node = node_loc_dic_rev[end_loc]
# add edge, which is start_node to end_node
edge_props = {'start': start_node,
'start_loc_pix': start_loc,
'end': end_node,
'end_loc_pix': end_loc,
'length_pix': length,
'wkt_pix': lstring,
'geometry_pix': shape}
edge_dic[edge_iter] = edge_props
edge_iter += 1
return node_loc_dic, edge_dic | def wkt_list_to_nodes_edges_sloppy(wkt_list):
'''Convert wkt list to nodes and edges
Assumes each linestring corresponds to a unique edge
Since this is not always the case, this function fails if a linestring
contains multiple edges'''
node_loc_set = set() # set of edge locations
node_loc_dic = {} # key = node idx, val = location
node_loc_dic_rev = {} # key = location, val = node idx
edge_dic = {} # edge properties
node_iter = 0
edge_iter = 0
for lstring in wkt_list:
# get lstring properties
shape = shapely.wkt.loads(lstring)
x, y = shape.coords.xy
length = shape.length
# set start node
start_loc = (x[0], y[0])
# if new, create new node
if start_loc not in node_loc_set:
node_loc_set.add(start_loc)
node_loc_dic[node_iter] = start_loc
node_loc_dic_rev[start_loc] = node_iter
start_node = node_iter
node_iter += 1
# if seen before, retrieve node properties
else:
start_node = node_loc_dic_rev[start_loc]
# set end node (just like start node)
end_loc = (x[-1], y[-1])
# if new, create new node
if end_loc not in node_loc_set:
node_loc_set.add(end_loc)
node_loc_dic[node_iter] = end_loc
node_loc_dic_rev[end_loc] = node_iter
end_node = node_iter
node_iter += 1
# if seen before, retrieve node properties
else:
end_node = node_loc_dic_rev[end_loc]
# add edge, which is start_node to end_node
edge_props = {'start': start_node,
'start_loc_pix': start_loc,
'end': end_node,
'end_loc_pix': end_loc,
'length_pix': length,
'wkt_pix': lstring,
'geometry_pix': shape}
edge_dic[edge_iter] = edge_props
edge_iter += 1
return node_loc_dic, edge_dic |
Python | def shp_to_G(shp_file):
'''Ingest G from shapefile
DOES NOT APPEAR TO WORK CORRECTLY'''
G = nx.read_shp(shp_file)
return G | def shp_to_G(shp_file):
'''Ingest G from shapefile
DOES NOT APPEAR TO WORK CORRECTLY'''
G = nx.read_shp(shp_file)
return G |
Python | def convert_pix_lstring_to_geo(wkt_lstring, im_file,
utm_zone=None, utm_letter=None, verbose=False):
'''Convert linestring in pixel coords to geo coords
If zone or letter changes inthe middle of line, it's all screwed up, so
force zone and letter based on first point
(latitude, longitude, force_zone_number=None, force_zone_letter=None)
Or just force utm zone and letter explicitly
'''
shape = wkt_lstring # shapely.wkt.loads(lstring)
x_pixs, y_pixs = shape.coords.xy
coords_latlon = []
coords_utm = []
for i, (x, y) in enumerate(zip(x_pixs, y_pixs)):
targetSR = osr.SpatialReference()
targetSR.ImportFromEPSG(4326)
lon, lat = pixelToGeoCoord(x, y, im_file, targetSR=targetSR)
if utm_zone and utm_letter:
[utm_east, utm_north, _, _] = utm.from_latlon(lat, lon,
force_zone_number=utm_zone, force_zone_letter=utm_letter)
else:
[utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)
# # If zone or letter changes inthe middle of line, it's all screwed up, so
# # force zone and letter based on first point
# if i == 0:
# [utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)
# else:
# [utm_east, utm_north, _, _] = utm.from_latlon(lat, lon,
# force_zone_number=utm_zone, force_zone_letter=utm_letter)
if verbose:
print("lat lon, utm_east, utm_north, utm_zone, utm_letter]",
[lat, lon, utm_east, utm_north, utm_zone, utm_letter])
coords_utm.append([utm_east, utm_north])
coords_latlon.append([lon, lat])
lstring_latlon = LineString([Point(z) for z in coords_latlon])
lstring_utm = LineString([Point(z) for z in coords_utm])
return lstring_latlon, lstring_utm, utm_zone, utm_letter | def convert_pix_lstring_to_geo(wkt_lstring, im_file,
utm_zone=None, utm_letter=None, verbose=False):
'''Convert linestring in pixel coords to geo coords
If zone or letter changes inthe middle of line, it's all screwed up, so
force zone and letter based on first point
(latitude, longitude, force_zone_number=None, force_zone_letter=None)
Or just force utm zone and letter explicitly
'''
shape = wkt_lstring # shapely.wkt.loads(lstring)
x_pixs, y_pixs = shape.coords.xy
coords_latlon = []
coords_utm = []
for i, (x, y) in enumerate(zip(x_pixs, y_pixs)):
targetSR = osr.SpatialReference()
targetSR.ImportFromEPSG(4326)
lon, lat = pixelToGeoCoord(x, y, im_file, targetSR=targetSR)
if utm_zone and utm_letter:
[utm_east, utm_north, _, _] = utm.from_latlon(lat, lon,
force_zone_number=utm_zone, force_zone_letter=utm_letter)
else:
[utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)
# # If zone or letter changes inthe middle of line, it's all screwed up, so
# # force zone and letter based on first point
# if i == 0:
# [utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)
# else:
# [utm_east, utm_north, _, _] = utm.from_latlon(lat, lon,
# force_zone_number=utm_zone, force_zone_letter=utm_letter)
if verbose:
print("lat lon, utm_east, utm_north, utm_zone, utm_letter]",
[lat, lon, utm_east, utm_north, utm_zone, utm_letter])
coords_utm.append([utm_east, utm_north])
coords_latlon.append([lon, lat])
lstring_latlon = LineString([Point(z) for z in coords_latlon])
lstring_utm = LineString([Point(z) for z in coords_utm])
return lstring_latlon, lstring_utm, utm_zone, utm_letter |
Python | def load_speed_conversion_dict_binned(csv_loc, speed_increment=5):
'''Load speed to burn_val conversion dataframe
and create conversion dictionary.
speed_increment is the increment of speed limits in mph
10 mph bins go from 1-10, and 21-30, etc.
breakdown of speed limits in training set:
# 15.0 5143
# 18.75 6232
# 20.0 18098
# 22.5 347
# 25.0 16526
# 26.25 50
# 30.0 734
# 33.75 26
# 35.0 3583
# 41.25 16
# 45.0 2991
# 48.75 17
# 55.0 2081
# 65.0 407
Assuming a similar distribut in the test set allos us to
'''
df_ = pd.read_csv(csv_loc, index_col=0)
# get dict of channel to speed
df = df_[['channel', 'speed']]
# simple mean of speed bins
means = df.groupby(['channel']).mean().astype(int)
dic = means.to_dict()['speed']
# speeds are every 5 mph, so take the mean of the 5 mph bins
# z = [tmp for tmp in a if tmp%5==0]
# or just add increment/2 to means...
dic.update((x, y + speed_increment / 2) for x, y in dic.items())
##########
# OPTIONAL
# if using 10mph bins, update dic
dic[0] = 7.5
dic[1] = 17.5 # 15, 18.75, and 20 are all common
dic[2] = 25 # 25 mph speed limit is ubiquitous
dic[3] = 35 # 35 mph speed limit is ubiquitous
dic[4] = 45 # 45 mph speed limit is ubiquitous
dic[5] = 55 # 55 mph speed limit is ubiquitous
dic[6] = 65 # 65 mph speed limit is ubiquitous
##########
return df_, dic | def load_speed_conversion_dict_binned(csv_loc, speed_increment=5):
'''Load speed to burn_val conversion dataframe
and create conversion dictionary.
speed_increment is the increment of speed limits in mph
10 mph bins go from 1-10, and 21-30, etc.
breakdown of speed limits in training set:
# 15.0 5143
# 18.75 6232
# 20.0 18098
# 22.5 347
# 25.0 16526
# 26.25 50
# 30.0 734
# 33.75 26
# 35.0 3583
# 41.25 16
# 45.0 2991
# 48.75 17
# 55.0 2081
# 65.0 407
Assuming a similar distribut in the test set allos us to
'''
df_ = pd.read_csv(csv_loc, index_col=0)
# get dict of channel to speed
df = df_[['channel', 'speed']]
# simple mean of speed bins
means = df.groupby(['channel']).mean().astype(int)
dic = means.to_dict()['speed']
# speeds are every 5 mph, so take the mean of the 5 mph bins
# z = [tmp for tmp in a if tmp%5==0]
# or just add increment/2 to means...
dic.update((x, y + speed_increment / 2) for x, y in dic.items())
##########
# OPTIONAL
# if using 10mph bins, update dic
dic[0] = 7.5
dic[1] = 17.5 # 15, 18.75, and 20 are all common
dic[2] = 25 # 25 mph speed limit is ubiquitous
dic[3] = 35 # 35 mph speed limit is ubiquitous
dic[4] = 45 # 45 mph speed limit is ubiquitous
dic[5] = 55 # 55 mph speed limit is ubiquitous
dic[6] = 65 # 65 mph speed limit is ubiquitous
##########
return df_, dic |
Python | def add_travel_time_dir(graph_dir, mask_dir, conv_dict, graph_dir_out,
min_z=128, dx=4, dy=4, percentile=90,
use_totband=True, use_weighted_mean=True,
variable_edge_speed=False, mask_prefix='',
save_shapefiles=True,
verbose=False):
'''Update graph properties to include travel time for entire directory'''
pickle_protocol = 4 # 4 is most recent, python 2.7 can't read 4
logger1.info("Updating graph properties to include travel time")
logger1.info(" Writing to: " + str(graph_dir_out))
os.makedirs(graph_dir_out, exist_ok=True)
image_names = sorted([z for z in os.listdir(mask_dir) if z.endswith('.tif')])
imfiles_args = [[image_name, pickle_protocol, graph_dir, mask_dir, conv_dict, graph_dir_out,
min_z, dx, dy, percentile,
use_totband, use_weighted_mean,
variable_edge_speed, mask_prefix,
save_shapefiles,
verbose] for image_name in image_names]
with multiprocessing.Pool(16) as pool:
list(pool.starmap(add_travel_time_img, imfiles_args))
# all_data = [data for cur_data in all_data for data in cur_data]
# for i, image_name in enumerate(image_names):
# im_root = image_name.split('.')[0]
# if len(mask_prefix) > 0:
# im_root = im_root.split(mask_prefix)[-1]
# out_file = os.path.join(graph_dir_out, im_root + '.gpickle')
#
# if (i % 1) == 0:
# logger1.info("\n" + str(i + 1) + " / " + str(len(image_names)) + " " + image_name + " " + im_root)
# mask_path = os.path.join(mask_dir, image_name)
# graph_path = os.path.join(graph_dir, im_root + '.gpickle')
#
# if not os.path.exists(graph_path):
# logger1.info(" ", i, "DNE, skipping: " + str(graph_path))
# return
# # continue
#
# if verbose:
# logger1.info("mask_path: " + mask_path)
# logger1.info("graph_path: " + graph_path)
#
# mask = skimage.io.imread(mask_path)
# G_raw = nx.read_gpickle(graph_path)
#
# # see if it's empty
# if len(G_raw.nodes()) == 0:
# nx.write_gpickle(G_raw, out_file, protocol=pickle_protocol)
# continue
#
# G = infer_travel_time(G_raw, mask, conv_dict,
# min_z=min_z, dx=dx, dy=dy,
# percentile=percentile,
# use_totband=use_totband,
# use_weighted_mean=use_weighted_mean,
# variable_edge_speed=variable_edge_speed,
# verbose=verbose)
# G = G.to_undirected()
# # save graph
# # logger1.info("Saving graph to directory: " + graph_dir)
# # out_file = os.path.join(graph_dir_out, image_name.split('.')[0] + '.gpickle')
# nx.write_gpickle(G, out_file, protocol=pickle_protocol)
#
# # save shapefile as well?
# if save_shapefiles:
# G_out = G # ox.simplify_graph(G.to_directed())
# logger1.info("Saving shapefile to directory: {}".format(graph_dir_out))
# ox.save_graph_shapefile(G_out, filename=im_root, folder=graph_dir_out,
# encoding='utf-8')
# # out_file2 = os.path.join(graph_dir, image_id.split('.')[0] + '.graphml')
# # ox.save_graphml(G, image_id.split('.')[0] + '.graphml', folder=graph_dir)
return | def add_travel_time_dir(graph_dir, mask_dir, conv_dict, graph_dir_out,
min_z=128, dx=4, dy=4, percentile=90,
use_totband=True, use_weighted_mean=True,
variable_edge_speed=False, mask_prefix='',
save_shapefiles=True,
verbose=False):
'''Update graph properties to include travel time for entire directory'''
pickle_protocol = 4 # 4 is most recent, python 2.7 can't read 4
logger1.info("Updating graph properties to include travel time")
logger1.info(" Writing to: " + str(graph_dir_out))
os.makedirs(graph_dir_out, exist_ok=True)
image_names = sorted([z for z in os.listdir(mask_dir) if z.endswith('.tif')])
imfiles_args = [[image_name, pickle_protocol, graph_dir, mask_dir, conv_dict, graph_dir_out,
min_z, dx, dy, percentile,
use_totband, use_weighted_mean,
variable_edge_speed, mask_prefix,
save_shapefiles,
verbose] for image_name in image_names]
with multiprocessing.Pool(16) as pool:
list(pool.starmap(add_travel_time_img, imfiles_args))
# all_data = [data for cur_data in all_data for data in cur_data]
# for i, image_name in enumerate(image_names):
# im_root = image_name.split('.')[0]
# if len(mask_prefix) > 0:
# im_root = im_root.split(mask_prefix)[-1]
# out_file = os.path.join(graph_dir_out, im_root + '.gpickle')
#
# if (i % 1) == 0:
# logger1.info("\n" + str(i + 1) + " / " + str(len(image_names)) + " " + image_name + " " + im_root)
# mask_path = os.path.join(mask_dir, image_name)
# graph_path = os.path.join(graph_dir, im_root + '.gpickle')
#
# if not os.path.exists(graph_path):
# logger1.info(" ", i, "DNE, skipping: " + str(graph_path))
# return
# # continue
#
# if verbose:
# logger1.info("mask_path: " + mask_path)
# logger1.info("graph_path: " + graph_path)
#
# mask = skimage.io.imread(mask_path)
# G_raw = nx.read_gpickle(graph_path)
#
# # see if it's empty
# if len(G_raw.nodes()) == 0:
# nx.write_gpickle(G_raw, out_file, protocol=pickle_protocol)
# continue
#
# G = infer_travel_time(G_raw, mask, conv_dict,
# min_z=min_z, dx=dx, dy=dy,
# percentile=percentile,
# use_totband=use_totband,
# use_weighted_mean=use_weighted_mean,
# variable_edge_speed=variable_edge_speed,
# verbose=verbose)
# G = G.to_undirected()
# # save graph
# # logger1.info("Saving graph to directory: " + graph_dir)
# # out_file = os.path.join(graph_dir_out, image_name.split('.')[0] + '.gpickle')
# nx.write_gpickle(G, out_file, protocol=pickle_protocol)
#
# # save shapefile as well?
# if save_shapefiles:
# G_out = G # ox.simplify_graph(G.to_directed())
# logger1.info("Saving shapefile to directory: {}".format(graph_dir_out))
# ox.save_graph_shapefile(G_out, filename=im_root, folder=graph_dir_out,
# encoding='utf-8')
# # out_file2 = os.path.join(graph_dir, image_id.split('.')[0] + '.graphml')
# # ox.save_graphml(G, image_id.split('.')[0] + '.graphml', folder=graph_dir)
return |
Python | def clean_sub_graphs(G_, min_length=150, max_nodes_to_skip=100,
weight='length_pix', verbose=True,
super_verbose=False):
'''Remove subgraphs with a max path length less than min_length,
if the subgraph has more than max_noxes_to_skip, don't check length
(this step great improves processing time)'''
if len(G_.nodes()) == 0:
return G_
# print ("Running clean_sub_graphs...")
sub_graphs = list(nx.connected_component_subgraphs(G_))
bad_nodes = []
for G_sub in sub_graphs:
# don't check length if too many nodes in subgraph
if len(G_sub.nodes()) > max_nodes_to_skip:
continue
else:
all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))
# get all lenghts
lens = []
#for u,v in all_lengths.iteritems():
for u in all_lengths.keys():
v = all_lengths[u]
#for uprime, vprime in v.iteritems():
for uprime in v.keys():
vprime = v[uprime]
lens.append(vprime)
max_len = np.max(lens)
if max_len < min_length:
bad_nodes.extend(G_sub.nodes())
# remove bad_nodes
G_.remove_nodes_from(bad_nodes)
return G_ | def clean_sub_graphs(G_, min_length=150, max_nodes_to_skip=100,
weight='length_pix', verbose=True,
super_verbose=False):
'''Remove subgraphs with a max path length less than min_length,
if the subgraph has more than max_noxes_to_skip, don't check length
(this step great improves processing time)'''
if len(G_.nodes()) == 0:
return G_
# print ("Running clean_sub_graphs...")
sub_graphs = list(nx.connected_component_subgraphs(G_))
bad_nodes = []
for G_sub in sub_graphs:
# don't check length if too many nodes in subgraph
if len(G_sub.nodes()) > max_nodes_to_skip:
continue
else:
all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))
# get all lenghts
lens = []
#for u,v in all_lengths.iteritems():
for u in all_lengths.keys():
v = all_lengths[u]
#for uprime, vprime in v.iteritems():
for uprime in v.keys():
vprime = v[uprime]
lens.append(vprime)
max_len = np.max(lens)
if max_len < min_length:
bad_nodes.extend(G_sub.nodes())
# remove bad_nodes
G_.remove_nodes_from(bad_nodes)
return G_ |
Python | def dl_post_process_pred(mask, glob_thresh=80, kernel_size=9,
min_area=2000, contour_smoothing=0.001,
adapt_kernel=85, adapt_const=-3,
outplot_file='', dpi=500, use_glob_thresh=False,
kernel_open=19, verbose=False):
'''Refine mask file and return both refined mask and skeleton'''
t0 = time.time()
kernel_blur = kernel_size #9
kernel_close = kernel_size #9
#kernel_open = kernel_size #9
kernel_close = np.ones((kernel_close,kernel_close), np.uint8)
kernel_open = np.ones((kernel_open, kernel_open), np.uint8)
blur = cv2.medianBlur(mask, kernel_blur)
# global thresh
glob_thresh_arr = cv2.threshold(blur, glob_thresh, 1, cv2.THRESH_BINARY)[1]
glob_thresh_arr_smooth = cv2.medianBlur(glob_thresh_arr, kernel_blur)
t1 = time.time()
# print ("Time to compute open(), close(), and get thresholds:", t1-t0, "seconds")
if use_glob_thresh:
mask_thresh = glob_thresh_arr_smooth
else:
adapt_thresh = cv2.adaptiveThreshold(mask,1,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,adapt_kernel, adapt_const)
# resmooth
adapt_thresh_smooth = cv2.medianBlur(adapt_thresh, kernel_blur)
mask_thresh = adapt_thresh_smooth
closing = cv2.morphologyEx(mask_thresh, cv2.MORPH_CLOSE, kernel_close)
opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel_open)
# try on bgRemoved?
t2 = time.time()
# set output
if contour_smoothing < 0:
final_mask = opening
else:
# contours
# remove small items
contours, cont_plot, hole_idxs = get_contours_complex(opening,
min_thresh=glob_thresh,
min_area=min_area,
contour_smoothing=contour_smoothing)
# for some reason contours don't extend to the edge, so clip the edge
# and resize
mask_filt_raw = get_mask(mask_thresh, cont_plot, hole_idxs=hole_idxs)
shape_tmp = mask_filt_raw.shape
mask_filt1 = 200 * cv2.resize(mask_filt_raw[2:-2, 2:-2], shape_tmp).astype(np.uint8)
# thresh and resmooth
mask_filt = cv2.GaussianBlur(mask_filt1, (kernel_blur, kernel_blur), 0)
#mask_filt = cv2.threshold(mask_filt2, glob_thresh, 1, cv2.THRESH_BINARY)
final_mask = mask_filt
t3 = time.time()
# print ("Time to smooth contours:", t3-t2, "seconds")
# skeletonize
#medial = medial_axis(final_mask)
#medial_int = medial.astype(np.uint8)
medial_int = medial_axis(final_mask).astype(np.uint8)
# print ("Time to compute medial_axis:", time.time() - t3, "seconds")
# print ("Time to run dl_post_process_pred():", time.time() - t0, "seconds")
return final_mask, medial_int | def dl_post_process_pred(mask, glob_thresh=80, kernel_size=9,
min_area=2000, contour_smoothing=0.001,
adapt_kernel=85, adapt_const=-3,
outplot_file='', dpi=500, use_glob_thresh=False,
kernel_open=19, verbose=False):
'''Refine mask file and return both refined mask and skeleton'''
t0 = time.time()
kernel_blur = kernel_size #9
kernel_close = kernel_size #9
#kernel_open = kernel_size #9
kernel_close = np.ones((kernel_close,kernel_close), np.uint8)
kernel_open = np.ones((kernel_open, kernel_open), np.uint8)
blur = cv2.medianBlur(mask, kernel_blur)
# global thresh
glob_thresh_arr = cv2.threshold(blur, glob_thresh, 1, cv2.THRESH_BINARY)[1]
glob_thresh_arr_smooth = cv2.medianBlur(glob_thresh_arr, kernel_blur)
t1 = time.time()
# print ("Time to compute open(), close(), and get thresholds:", t1-t0, "seconds")
if use_glob_thresh:
mask_thresh = glob_thresh_arr_smooth
else:
adapt_thresh = cv2.adaptiveThreshold(mask,1,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
cv2.THRESH_BINARY,adapt_kernel, adapt_const)
# resmooth
adapt_thresh_smooth = cv2.medianBlur(adapt_thresh, kernel_blur)
mask_thresh = adapt_thresh_smooth
closing = cv2.morphologyEx(mask_thresh, cv2.MORPH_CLOSE, kernel_close)
opening = cv2.morphologyEx(closing, cv2.MORPH_OPEN, kernel_open)
# try on bgRemoved?
t2 = time.time()
# set output
if contour_smoothing < 0:
final_mask = opening
else:
# contours
# remove small items
contours, cont_plot, hole_idxs = get_contours_complex(opening,
min_thresh=glob_thresh,
min_area=min_area,
contour_smoothing=contour_smoothing)
# for some reason contours don't extend to the edge, so clip the edge
# and resize
mask_filt_raw = get_mask(mask_thresh, cont_plot, hole_idxs=hole_idxs)
shape_tmp = mask_filt_raw.shape
mask_filt1 = 200 * cv2.resize(mask_filt_raw[2:-2, 2:-2], shape_tmp).astype(np.uint8)
# thresh and resmooth
mask_filt = cv2.GaussianBlur(mask_filt1, (kernel_blur, kernel_blur), 0)
#mask_filt = cv2.threshold(mask_filt2, glob_thresh, 1, cv2.THRESH_BINARY)
final_mask = mask_filt
t3 = time.time()
# print ("Time to smooth contours:", t3-t2, "seconds")
# skeletonize
#medial = medial_axis(final_mask)
#medial_int = medial.astype(np.uint8)
medial_int = medial_axis(final_mask).astype(np.uint8)
# print ("Time to compute medial_axis:", time.time() - t3, "seconds")
# print ("Time to run dl_post_process_pred():", time.time() - t0, "seconds")
return final_mask, medial_int |
Python | def make_skeleton(img_loc,
thresh,
debug,
fix_borders,
replicate=5,
clip=2,
img_mult=255,
hole_size=300,
cv2_kernel_close=7,
cv2_kernel_open=7,
use_medial_axis=False,
num_classes=1,
skeleton_band='all'):
'''
Extract a skeleton from a mask.
skeleton_band is the index of the band of the mask to use for
skeleton extractionk, set to string 'all' to use all bands
'''
# print ("Executing make_skeleton...")
t0 = time.time()
#replicate = 5
#clip = 2
rec = replicate + clip
# read in data
if num_classes == 1:
try:
img = cv2.imread(img_loc, cv2.IMREAD_GRAYSCALE)
except:
img = skimage.io.imread(img_loc, as_gray=True).astype(np.uint8)#[::-1]
else:
# ensure 8bit?
img_tmp = skimage.io.imread(img_loc).astype(np.uint8)
#img_tmp = skimage.io.imread(img_loc)
# we want skimage to read in (channels, h, w) for multi-channel
# assume less than 20 channels
if img_tmp.shape[0] > 20:
img_full = np.moveaxis(img_tmp, 0, -1)
else:
img_full = img_tmp
# select the desired band for skeleton extraction
# if < 0, sum all bands
if type(skeleton_band) == str: #skeleton_band < 0:
img = np.sum(img_full, axis=0).astype(np.int8)
else:
img = img_full[skeleton_band, :, :]
# potentially keep only subset of data
shape0 = img.shape
if fix_borders:
img = cv2.copyMakeBorder(img, replicate, replicate, replicate,
replicate, cv2.BORDER_REPLICATE)
img_copy = None
if debug:
if fix_borders:
img_copy = np.copy(img[replicate:-replicate,replicate:-replicate])
else:
img_copy = np.copy(img)
t1 = time.time()
img = preprocess(img, thresh, img_mult=img_mult, hole_size=hole_size,
cv2_kernel_close=cv2_kernel_close,
cv2_kernel_open=cv2_kernel_open)
t2 = time.time()
if not np.any(img):
return None, None
if not use_medial_axis:
ske = skeletonize(img).astype(np.uint16)
t3 = time.time()
else:
ske = skimage.morphology.medial_axis(img).astype(np.uint16)
t3 = time.time()
if fix_borders:
ske = ske[rec:-rec, rec:-rec]
ske = cv2.copyMakeBorder(ske, clip, clip, clip, clip, cv2.BORDER_CONSTANT, value=0)
t4 = time.time()
t1 = time.time()
return img, ske | def make_skeleton(img_loc,
thresh,
debug,
fix_borders,
replicate=5,
clip=2,
img_mult=255,
hole_size=300,
cv2_kernel_close=7,
cv2_kernel_open=7,
use_medial_axis=False,
num_classes=1,
skeleton_band='all'):
'''
Extract a skeleton from a mask.
skeleton_band is the index of the band of the mask to use for
skeleton extractionk, set to string 'all' to use all bands
'''
# print ("Executing make_skeleton...")
t0 = time.time()
#replicate = 5
#clip = 2
rec = replicate + clip
# read in data
if num_classes == 1:
try:
img = cv2.imread(img_loc, cv2.IMREAD_GRAYSCALE)
except:
img = skimage.io.imread(img_loc, as_gray=True).astype(np.uint8)#[::-1]
else:
# ensure 8bit?
img_tmp = skimage.io.imread(img_loc).astype(np.uint8)
#img_tmp = skimage.io.imread(img_loc)
# we want skimage to read in (channels, h, w) for multi-channel
# assume less than 20 channels
if img_tmp.shape[0] > 20:
img_full = np.moveaxis(img_tmp, 0, -1)
else:
img_full = img_tmp
# select the desired band for skeleton extraction
# if < 0, sum all bands
if type(skeleton_band) == str: #skeleton_band < 0:
img = np.sum(img_full, axis=0).astype(np.int8)
else:
img = img_full[skeleton_band, :, :]
# potentially keep only subset of data
shape0 = img.shape
if fix_borders:
img = cv2.copyMakeBorder(img, replicate, replicate, replicate,
replicate, cv2.BORDER_REPLICATE)
img_copy = None
if debug:
if fix_borders:
img_copy = np.copy(img[replicate:-replicate,replicate:-replicate])
else:
img_copy = np.copy(img)
t1 = time.time()
img = preprocess(img, thresh, img_mult=img_mult, hole_size=hole_size,
cv2_kernel_close=cv2_kernel_close,
cv2_kernel_open=cv2_kernel_open)
t2 = time.time()
if not np.any(img):
return None, None
if not use_medial_axis:
ske = skeletonize(img).astype(np.uint16)
t3 = time.time()
else:
ske = skimage.morphology.medial_axis(img).astype(np.uint16)
t3 = time.time()
if fix_borders:
ske = ske[rec:-rec, rec:-rec]
ske = cv2.copyMakeBorder(ske, clip, clip, clip, clip, cv2.BORDER_CONSTANT, value=0)
t4 = time.time()
t1 = time.time()
return img, ske |
Python | def build_wkt_dir(indir, outfile, out_ske_dir, out_gdir='', thresh=0.3,
im_prefix='',
debug=False, add_small=True, fix_borders=True,
skel_replicate=5, skel_clip=2,
img_mult=255,
hole_size=300, cv2_kernel_close=7, cv2_kernel_open=7,
min_subgraph_length_pix=50,
spacenet_naming_convention=False,
num_classes=1,
skeleton_band='all'):
'''Execute built_graph_wkt for an entire folder
Split image name on AOI, keep only name after AOI. This is necessary for
scoring'''
all_data = []
im_files = np.sort([z for z in os.listdir(indir) if z.endswith('.tif')])
nfiles = len(im_files)
print(indir, nfiles)
args_list = []
for i, imfile in tqdm.tqdm(enumerate(im_files), total=nfiles):
args = (
imfile,
im_prefix,
indir,
spacenet_naming_convention,
out_ske_dir,
out_gdir,
thresh,
debug,
add_small,
fix_borders,
skel_replicate,
skel_clip,
img_mult,
hole_size,
cv2_kernel_close,
cv2_kernel_open,
min_subgraph_length_pix,
num_classes,
skeleton_band,
)
args_list.append(args)
with Pool(cpu_count()) as p:
data = list(tqdm.tqdm(
iterable=p.imap_unordered(_build_graph_wkt_iterable, args_list),
total=len(args_list)))
for im_root, wkt_list in sorted(data):
for v in wkt_list:
all_data.append((im_root, v))
# save to csv
df = pd.DataFrame(all_data, columns=['ImageId', 'WKT_Pix'])
df.to_csv(outfile, index=False)
return df | def build_wkt_dir(indir, outfile, out_ske_dir, out_gdir='', thresh=0.3,
im_prefix='',
debug=False, add_small=True, fix_borders=True,
skel_replicate=5, skel_clip=2,
img_mult=255,
hole_size=300, cv2_kernel_close=7, cv2_kernel_open=7,
min_subgraph_length_pix=50,
spacenet_naming_convention=False,
num_classes=1,
skeleton_band='all'):
'''Execute built_graph_wkt for an entire folder
Split image name on AOI, keep only name after AOI. This is necessary for
scoring'''
all_data = []
im_files = np.sort([z for z in os.listdir(indir) if z.endswith('.tif')])
nfiles = len(im_files)
print(indir, nfiles)
args_list = []
for i, imfile in tqdm.tqdm(enumerate(im_files), total=nfiles):
args = (
imfile,
im_prefix,
indir,
spacenet_naming_convention,
out_ske_dir,
out_gdir,
thresh,
debug,
add_small,
fix_borders,
skel_replicate,
skel_clip,
img_mult,
hole_size,
cv2_kernel_close,
cv2_kernel_open,
min_subgraph_length_pix,
num_classes,
skeleton_band,
)
args_list.append(args)
with Pool(cpu_count()) as p:
data = list(tqdm.tqdm(
iterable=p.imap_unordered(_build_graph_wkt_iterable, args_list),
total=len(args_list)))
for im_root, wkt_list in sorted(data):
for v in wkt_list:
all_data.append((im_root, v))
# save to csv
df = pd.DataFrame(all_data, columns=['ImageId', 'WKT_Pix'])
df.to_csv(outfile, index=False)
return df |
Python | def efficientnet_params(model_name):
""" Map EfficientNet model name to parameter coefficients. """
params_dict = {
# Coefficients: width,depth,res,dropout
'efficientnet_b0': (1.0, 1.0, 224, 0.2),
'efficientnet_b1': (1.0, 1.1, 240, 0.2),
'efficientnet_b2': (1.1, 1.2, 260, 0.3),
'efficientnet_b3': (1.2, 1.4, 300, 0.3),
'efficientnet_b4': (1.4, 1.8, 380, 0.4),
'efficientnet_b5': (1.6, 2.2, 456, 0.4),
'efficientnet_b6': (1.8, 2.6, 528, 0.5),
'efficientnet_b7': (2.0, 3.1, 600, 0.5),
}
return params_dict[model_name] | def efficientnet_params(model_name):
""" Map EfficientNet model name to parameter coefficients. """
params_dict = {
# Coefficients: width,depth,res,dropout
'efficientnet_b0': (1.0, 1.0, 224, 0.2),
'efficientnet_b1': (1.0, 1.1, 240, 0.2),
'efficientnet_b2': (1.1, 1.2, 260, 0.3),
'efficientnet_b3': (1.2, 1.4, 300, 0.3),
'efficientnet_b4': (1.4, 1.8, 380, 0.4),
'efficientnet_b5': (1.6, 2.2, 456, 0.4),
'efficientnet_b6': (1.8, 2.6, 528, 0.5),
'efficientnet_b7': (2.0, 3.1, 600, 0.5),
}
return params_dict[model_name] |
Python | def extract_features(self, inputs):
""" Returns output of the final convolution layer """
# Stem
x = relu_fn(self._bn0(self._conv_stem(inputs)))
print(x.size())
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks)
x = block(x) # , drop_connect_rate) # see https://github.com/tensorflow/tpu/issues/381
print(idx, x.size())
return x | def extract_features(self, inputs):
""" Returns output of the final convolution layer """
# Stem
x = relu_fn(self._bn0(self._conv_stem(inputs)))
print(x.size())
# Blocks
for idx, block in enumerate(self._blocks):
drop_connect_rate = self._global_params.drop_connect_rate
if drop_connect_rate:
drop_connect_rate *= float(idx) / len(self._blocks)
x = block(x) # , drop_connect_rate) # see https://github.com/tensorflow/tpu/issues/381
print(idx, x.size())
return x |
Python | def forward(self, inputs):
""" Calls extract_features to extract features,
applies final linear layer, and returns logits.
"""
# Convolution layers
x = self.extract_features(inputs)
# Head
x = relu_fn(self._bn1(self._conv_head(x)))
x = F.adaptive_avg_pool2d(x, 1).squeeze(-1).squeeze(-1)
if self._dropout:
x = F.dropout(x, p=self._dropout, training=self.training)
x = self._fc(x)
return x | def forward(self, inputs):
""" Calls extract_features to extract features,
applies final linear layer, and returns logits.
"""
# Convolution layers
x = self.extract_features(inputs)
# Head
x = relu_fn(self._bn1(self._conv_head(x)))
x = F.adaptive_avg_pool2d(x, 1).squeeze(-1).squeeze(-1)
if self._dropout:
x = F.dropout(x, p=self._dropout, training=self.training)
x = self._fc(x)
return x |
Python | def _check_model_name_is_valid(cls, model_name, also_need_pretrained_weights=False):
""" Validates model name. None that pretrained weights are only available for
the first four models (efficientnet-b{i} for i in 0,1,2,3) at the moment. """
num_models = 4 if also_need_pretrained_weights else 8
valid_models = ['efficientnet_b' + str(i) for i in range(num_models)]
if model_name.replace('-', '_') not in valid_models:
raise ValueError('model_name should be one of: ' + ', '.join(valid_models)) | def _check_model_name_is_valid(cls, model_name, also_need_pretrained_weights=False):
""" Validates model name. None that pretrained weights are only available for
the first four models (efficientnet-b{i} for i in 0,1,2,3) at the moment. """
num_models = 4 if also_need_pretrained_weights else 8
valid_models = ['efficientnet_b' + str(i) for i in range(num_models)]
if model_name.replace('-', '_') not in valid_models:
raise ValueError('model_name should be one of: ' + ', '.join(valid_models)) |
Python | def efficientnet_b0(pretrained=False, progress=True, **kwargs):
"""Constructs a EfficientNet-B0 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b0', pretrained, progress, **kwargs) | def efficientnet_b0(pretrained=False, progress=True, **kwargs):
"""Constructs a EfficientNet-B0 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b0', pretrained, progress, **kwargs) |
Python | def efficientnet_b1(pretrained=False, progress=True, **kwargs):
"""Constructs a EfficientNet-B1 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b1', pretrained, progress, **kwargs) | def efficientnet_b1(pretrained=False, progress=True, **kwargs):
"""Constructs a EfficientNet-B1 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b1', pretrained, progress, **kwargs) |
Python | def efficientnet_b2(pretrained=False, progress=True, **kwargs):
"""Constructs a EfficientNet-B2 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b2', pretrained, progress, **kwargs) | def efficientnet_b2(pretrained=False, progress=True, **kwargs):
"""Constructs a EfficientNet-B2 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b2', pretrained, progress, **kwargs) |
Python | def efficientnet_b3(pretrained=False, progress=True, **kwargs):
"""Constructs a EfficientNet-B3 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b3', pretrained, progress, **kwargs) | def efficientnet_b3(pretrained=False, progress=True, **kwargs):
"""Constructs a EfficientNet-B3 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b3', pretrained, progress, **kwargs) |
Python | def efficientnet_b4(progress=True, **kwargs):
"""Constructs a EfficientNet-B4 model.
Args:
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b4', False, progress, **kwargs) | def efficientnet_b4(progress=True, **kwargs):
"""Constructs a EfficientNet-B4 model.
Args:
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b4', False, progress, **kwargs) |
Python | def efficientnet_b5(progress=True, **kwargs):
"""Constructs a EfficientNet-B5 model.
Args:
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b5', False, progress, **kwargs) | def efficientnet_b5(progress=True, **kwargs):
"""Constructs a EfficientNet-B5 model.
Args:
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b5', False, progress, **kwargs) |
Python | def efficientnet_b6(progress=True, **kwargs):
"""Constructs a EfficientNet-B6 model.
Args:
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b6', False, progress, **kwargs) | def efficientnet_b6(progress=True, **kwargs):
"""Constructs a EfficientNet-B6 model.
Args:
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b6', False, progress, **kwargs) |
Python | def efficientnet_b7(progress=True, **kwargs):
"""Constructs a EfficientNet-B7 model.
Args:
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b7', False, progress, **kwargs) | def efficientnet_b7(progress=True, **kwargs):
"""Constructs a EfficientNet-B7 model.
Args:
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _efficientnet('efficientnet_b7', False, progress, **kwargs) |
Python | def convert_pix_lstring_to_geo(wkt_lstring, im_file,
utm_zone=None, utm_letter=None, verbose=False):
'''Convert linestring in pixel coords to geo coords
If zone or letter changes inthe middle of line, it's all screwed up, so
force zone and letter based on first point
(latitude, longitude, force_zone_number=None, force_zone_letter=None)
Or just force utm zone and letter explicitly
'''
shape = wkt_lstring #shapely.wkt.loads(lstring)
x_pixs, y_pixs = shape.coords.xy
coords_latlon = []
coords_utm = []
for i,(x,y) in enumerate(zip(x_pixs, y_pixs)):
targetSR = osr.SpatialReference()
targetSR.ImportFromEPSG(4326)
lon, lat = pixelToGeoCoord(x, y, im_file, targetSR=targetSR)
if utm_zone and utm_letter:
[utm_east, utm_north, _, _] = utm.from_latlon(lat, lon,
force_zone_number=utm_zone, force_zone_letter=utm_letter)
else:
[utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)
if verbose:
print("lat lon, utm_east, utm_north, utm_zone, utm_letter]",
[lat, lon, utm_east, utm_north, utm_zone, utm_letter])
coords_utm.append([utm_east, utm_north])
coords_latlon.append([lon, lat])
lstring_latlon = LineString([Point(z) for z in coords_latlon])
lstring_utm = LineString([Point(z) for z in coords_utm])
return lstring_latlon, lstring_utm, utm_zone, utm_letter | def convert_pix_lstring_to_geo(wkt_lstring, im_file,
utm_zone=None, utm_letter=None, verbose=False):
'''Convert linestring in pixel coords to geo coords
If zone or letter changes inthe middle of line, it's all screwed up, so
force zone and letter based on first point
(latitude, longitude, force_zone_number=None, force_zone_letter=None)
Or just force utm zone and letter explicitly
'''
shape = wkt_lstring #shapely.wkt.loads(lstring)
x_pixs, y_pixs = shape.coords.xy
coords_latlon = []
coords_utm = []
for i,(x,y) in enumerate(zip(x_pixs, y_pixs)):
targetSR = osr.SpatialReference()
targetSR.ImportFromEPSG(4326)
lon, lat = pixelToGeoCoord(x, y, im_file, targetSR=targetSR)
if utm_zone and utm_letter:
[utm_east, utm_north, _, _] = utm.from_latlon(lat, lon,
force_zone_number=utm_zone, force_zone_letter=utm_letter)
else:
[utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)
if verbose:
print("lat lon, utm_east, utm_north, utm_zone, utm_letter]",
[lat, lon, utm_east, utm_north, utm_zone, utm_letter])
coords_utm.append([utm_east, utm_north])
coords_latlon.append([lon, lat])
lstring_latlon = LineString([Point(z) for z in coords_latlon])
lstring_utm = LineString([Point(z) for z in coords_utm])
return lstring_latlon, lstring_utm, utm_zone, utm_letter |
Python | def post_process_image(df_pos_, data_dir, num_classes=1, im_prefix='',
super_verbose=False):
'''
For a dataframe of image positions (df_pos_), and the tiles of that image,
reconstruct the image. Image can be a single band mask, a 3-band image, a
multiband mask, or a multiband image. Adapted from basiss.py
Assume that only one image root is in df_pos_
'''
# make sure we don't saturate overlapping, images, so rescale by a factor
# of 4 (this allows us to not use np.uint16 for mask_raw)
rescale_factor = 1
# get image width and height
w, h = df_pos_['im_x'].values[0], df_pos_['im_y'].values[0]
if num_classes == 1:
# create numpy zeros of appropriate shape
#mask_raw = np.zeros((h,w), dtype=np.uint8) # dtype=np.uint16)
mask_raw = np.zeros((h,w), dtype=np.uint16)
# = create another zero array to record which pixels are overlaid
mask_norm = np.zeros((h,w), dtype=np.uint8) # dtype=np.uint16)
else:
# create numpy zeros of appropriate shape
#mask_raw = np.zeros((h,w), dtype=np.uint8) # dtype=np.uint16)
mask_raw = np.zeros((h,w, num_classes), dtype=np.uint16)
# = create another zero array to record which pixels are overlaid
mask_norm = np.zeros((h,w, num_classes), dtype=np.uint8) # dtype=np.uint16)
overlay_count = np.zeros((h,w), dtype=np.uint8)
# iterate through slices
for i, (idx_tmp, item) in enumerate(df_pos_.iterrows()):
if (i % 50) == 0:
print (i, "/", len(df_pos_))
#print (i, "\n", idx_tmp, "\n", item)
[row_val, idx, name, name_full, xmin, ymin, slice_x, slice_y, im_x, im_y] = item
# add prefix, if required
if len(im_prefix) > 0:
name = im_prefix + name
name_full = im_prefix + name_full
if num_classes == 1:
mask_slice_refine = cv2.imread(os.path.join(data_dir, name_full), 0)
elif num_classes == 3:
mask_slice_refine = cv2.imread(os.path.join(data_dir, name_full), 1)
else:
t01 = time.time()
# mask_slice_refine = gdal.Open(os.path.join(data_dir, name_full)).ReadAsArray()
# # make sure image is h,w,channels (assume less than 20 channels)
# if (len(mask_slice_refine.shape) == 3) and (mask_slice_refine.shape[0] < 20):
# mask_slice_refine = np.moveaxis(mask_slice_refine, 0, -1)
# gdal is much faster, just needs some work !!!!
# skimage
plugin = 'tifffile'
mask_slice_refine = skimage.io.imread(os.path.join(data_dir, name_full), plugin=plugin)
# assume channels, h, w, so we want to reorder to h,w, channels?
# assume less than 20 channels
if mask_slice_refine.shape[0] <= 20:
# print("reorder mask_slice_refine.shape", mask_slice_refine.shape)
mask_slice_refine = np.moveaxis(mask_slice_refine, 0, -1)
#print ("mask_slice_refine.shape:", mask_slice_refine.shape)
#print ("Time to read image:", time.time() - t01, "seconds")
# # we want skimage to read in (channels, h, w) for multi-channel
# # assume less than 20 channels
# #print ("mask_channels.shape:", mask_channels.shape)
# if prob_arr_tmp.shape[0] > 20:
# #print ("mask_channels.shape:", mask_channels.shape)
# prob_arr = np.moveaxis(prob_arr_tmp, 0, -1)
# #print ("mask.shape:", mask.shape)
# rescale make slice?
if rescale_factor != 1:
mask_slice_refine = (mask_slice_refine / rescale_factor).astype(np.uint8)
#print ("mask_slice_refine:", mask_slice_refine)
if super_verbose:
print ("item:", item)
x0, x1 = xmin, xmin + slice_x
y0, y1 = ymin, ymin + slice_y
if num_classes == 1:
# add mask to mask_raw
mask_raw[y0:y1, x0:x1] += mask_slice_refine
else:
# add mask to mask_raw
mask_raw[y0:y1, x0:x1, :] += mask_slice_refine
# per channel
#for c in range(num_classes):
# mask_raw[y0:y1, x0:x1, c] += mask_slice_refine[:,:,c]
# update count
overlay_count[y0:y1, x0:x1] += np.ones((slice_y, slice_x), dtype=np.uint8)
# compute normalized mask
# if overlay_count == 0, reset to 1
overlay_count[np.where(overlay_count == 0)] = 1
if rescale_factor != 1:
mask_raw = mask_raw.astype(np.uint8)
#print ("np.max(overlay_count):", np.max(overlay_count))
#print ("np.min(overlay_count):", np.min(overlay_count))
# throws a memory error if using np.divide...
if (w < 60000) and (h < 60000):
if num_classes == 1:
mask_norm = np.divide(mask_raw, overlay_count).astype(np.uint8)
else:
for j in range(num_classes):
mask_norm[:,:,j] = np.divide(mask_raw[:,:,j], overlay_count).astype(np.uint8)
else:
for j in range(h):
#print ("j:", j)
mask_norm[j] = (mask_raw[j] / overlay_count[j]).astype(np.uint8)
# # throws a memory error if using np.divide...
# if (w < 60000) and (h < 60000):
# mask_norm = np.divide(mask_raw, overlay_count).astype(np.uint8)
# else:
# for j in range(h):
# #print ("j:", j)
# mask_norm[j] = (mask_raw[j] / overlay_count[j]).astype(np.uint8)
# #for k in range(w):
# # mask_norm[j,k] = (mask_raw[j,k] / overlay_count[j,k]).astype(np.uint8)
# rescale mask_norm
if rescale_factor != 1:
mask_norm = (mask_norm * rescale_factor).astype(np.uint8)
#print ("mask_norm.shape:", mask_norm.shape)
#print ("mask_norm.dtype:", mask_norm.dtype)
return name, mask_norm, mask_raw, overlay_count | def post_process_image(df_pos_, data_dir, num_classes=1, im_prefix='',
super_verbose=False):
'''
For a dataframe of image positions (df_pos_), and the tiles of that image,
reconstruct the image. Image can be a single band mask, a 3-band image, a
multiband mask, or a multiband image. Adapted from basiss.py
Assume that only one image root is in df_pos_
'''
# make sure we don't saturate overlapping, images, so rescale by a factor
# of 4 (this allows us to not use np.uint16 for mask_raw)
rescale_factor = 1
# get image width and height
w, h = df_pos_['im_x'].values[0], df_pos_['im_y'].values[0]
if num_classes == 1:
# create numpy zeros of appropriate shape
#mask_raw = np.zeros((h,w), dtype=np.uint8) # dtype=np.uint16)
mask_raw = np.zeros((h,w), dtype=np.uint16)
# = create another zero array to record which pixels are overlaid
mask_norm = np.zeros((h,w), dtype=np.uint8) # dtype=np.uint16)
else:
# create numpy zeros of appropriate shape
#mask_raw = np.zeros((h,w), dtype=np.uint8) # dtype=np.uint16)
mask_raw = np.zeros((h,w, num_classes), dtype=np.uint16)
# = create another zero array to record which pixels are overlaid
mask_norm = np.zeros((h,w, num_classes), dtype=np.uint8) # dtype=np.uint16)
overlay_count = np.zeros((h,w), dtype=np.uint8)
# iterate through slices
for i, (idx_tmp, item) in enumerate(df_pos_.iterrows()):
if (i % 50) == 0:
print (i, "/", len(df_pos_))
#print (i, "\n", idx_tmp, "\n", item)
[row_val, idx, name, name_full, xmin, ymin, slice_x, slice_y, im_x, im_y] = item
# add prefix, if required
if len(im_prefix) > 0:
name = im_prefix + name
name_full = im_prefix + name_full
if num_classes == 1:
mask_slice_refine = cv2.imread(os.path.join(data_dir, name_full), 0)
elif num_classes == 3:
mask_slice_refine = cv2.imread(os.path.join(data_dir, name_full), 1)
else:
t01 = time.time()
# mask_slice_refine = gdal.Open(os.path.join(data_dir, name_full)).ReadAsArray()
# # make sure image is h,w,channels (assume less than 20 channels)
# if (len(mask_slice_refine.shape) == 3) and (mask_slice_refine.shape[0] < 20):
# mask_slice_refine = np.moveaxis(mask_slice_refine, 0, -1)
# gdal is much faster, just needs some work !!!!
# skimage
plugin = 'tifffile'
mask_slice_refine = skimage.io.imread(os.path.join(data_dir, name_full), plugin=plugin)
# assume channels, h, w, so we want to reorder to h,w, channels?
# assume less than 20 channels
if mask_slice_refine.shape[0] <= 20:
# print("reorder mask_slice_refine.shape", mask_slice_refine.shape)
mask_slice_refine = np.moveaxis(mask_slice_refine, 0, -1)
#print ("mask_slice_refine.shape:", mask_slice_refine.shape)
#print ("Time to read image:", time.time() - t01, "seconds")
# # we want skimage to read in (channels, h, w) for multi-channel
# # assume less than 20 channels
# #print ("mask_channels.shape:", mask_channels.shape)
# if prob_arr_tmp.shape[0] > 20:
# #print ("mask_channels.shape:", mask_channels.shape)
# prob_arr = np.moveaxis(prob_arr_tmp, 0, -1)
# #print ("mask.shape:", mask.shape)
# rescale make slice?
if rescale_factor != 1:
mask_slice_refine = (mask_slice_refine / rescale_factor).astype(np.uint8)
#print ("mask_slice_refine:", mask_slice_refine)
if super_verbose:
print ("item:", item)
x0, x1 = xmin, xmin + slice_x
y0, y1 = ymin, ymin + slice_y
if num_classes == 1:
# add mask to mask_raw
mask_raw[y0:y1, x0:x1] += mask_slice_refine
else:
# add mask to mask_raw
mask_raw[y0:y1, x0:x1, :] += mask_slice_refine
# per channel
#for c in range(num_classes):
# mask_raw[y0:y1, x0:x1, c] += mask_slice_refine[:,:,c]
# update count
overlay_count[y0:y1, x0:x1] += np.ones((slice_y, slice_x), dtype=np.uint8)
# compute normalized mask
# if overlay_count == 0, reset to 1
overlay_count[np.where(overlay_count == 0)] = 1
if rescale_factor != 1:
mask_raw = mask_raw.astype(np.uint8)
#print ("np.max(overlay_count):", np.max(overlay_count))
#print ("np.min(overlay_count):", np.min(overlay_count))
# throws a memory error if using np.divide...
if (w < 60000) and (h < 60000):
if num_classes == 1:
mask_norm = np.divide(mask_raw, overlay_count).astype(np.uint8)
else:
for j in range(num_classes):
mask_norm[:,:,j] = np.divide(mask_raw[:,:,j], overlay_count).astype(np.uint8)
else:
for j in range(h):
#print ("j:", j)
mask_norm[j] = (mask_raw[j] / overlay_count[j]).astype(np.uint8)
# # throws a memory error if using np.divide...
# if (w < 60000) and (h < 60000):
# mask_norm = np.divide(mask_raw, overlay_count).astype(np.uint8)
# else:
# for j in range(h):
# #print ("j:", j)
# mask_norm[j] = (mask_raw[j] / overlay_count[j]).astype(np.uint8)
# #for k in range(w):
# # mask_norm[j,k] = (mask_raw[j,k] / overlay_count[j,k]).astype(np.uint8)
# rescale mask_norm
if rescale_factor != 1:
mask_norm = (mask_norm * rescale_factor).astype(np.uint8)
#print ("mask_norm.shape:", mask_norm.shape)
#print ("mask_norm.dtype:", mask_norm.dtype)
return name, mask_norm, mask_raw, overlay_count |
Python | def post_process_image_3band(df_pos_, data_dir, n_bands=3, im_prefix='',
super_verbose=False):
'''
For a dataframe of image positions (df_pos_), and the tiles of that image,
reconstruct the image. Adapted from basiss.py
Assume that only one image root is in df_pos_
'''
# make sure we don't saturate overlapping, images, so rescale by a factor
# of 4 (this allows us to not use np.uint16 for im_raw)
rescale_factor = 1
# get image width and height
w, h = df_pos_['im_x'].values[0], df_pos_['im_y'].values[0]
# create numpy zeros of appropriate shape
#im_raw = np.zeros((h,w), dtype=np.uint8) # dtype=np.uint16)
im_raw = np.zeros((h,w,n_bands), dtype=np.uint16)
# = create another zero array to record which pixels are overlaid
im_norm = np.zeros((h,w,n_bands), dtype=np.uint8) # dtype=np.uint16)
overlay_count = np.zeros((h,w), dtype=np.uint8)
# iterate through slices
for i, (idx_tmp, item) in enumerate(df_pos_.iterrows()):
if (i % 100) == 0:
print (i, "/", len(df_pos_))
#print (i, "\n", idx_tmp, "\n", item)
[row_val, idx, name, name_full, xmin, ymin, slice_x, slice_y, im_x, im_y] = item
if len(im_prefix) > 0:
name = im_prefix + name
name_full = im_prefix + name_full
# read in image
if n_bands == 3:
im_slice_refine = cv2.imread(os.path.join(data_dir, name_full), 1)
else:
print ("Still need to write code to handle multispecral data...")
return
# rescale make slice?
if rescale_factor != 1:
im_slice_refine = (im_slice_refine / rescale_factor).astype(np.uint8)
#print ("im_slice_refine:", im_slice_refine)
if super_verbose:
print ("item:", item)
x0, x1 = xmin, xmin + slice_x
y0, y1 = ymin, ymin + slice_y
#print ("x0, x1, y0, y1,", x0, x1, y0, y1)
# add data to im_raw for each band
for j in range(n_bands):
im_raw[y0:y1, x0:x1, j] += im_slice_refine[:,:,j]
# update count
overlay_count[y0:y1, x0:x1] += np.ones((slice_y, slice_x), dtype=np.uint8)
# compute normalized im
# if overlay_count == 0, reset to 1
overlay_count[np.where(overlay_count == 0)] = 1
if rescale_factor != 1:
im_raw = im_raw.astype(np.uint8)
#print ("np.max(overlay_count):", np.max(overlay_count))
#print ("np.min(overlay_count):", np.min(overlay_count))
# throws a memory error if using np.divide...
if h < 60000:
for j in range(n_bands):
im_norm[:,:,j] = np.divide(im_raw[:,:,j], overlay_count).astype(np.uint8)
else:
for j in range(h):
#print ("j:", j)
im_norm[j] = (im_raw[j] / overlay_count[j]).astype(np.uint8)
# rescale im_norm
if rescale_factor != 1:
im_norm = (im_norm * rescale_factor).astype(np.uint8)
#print ("im_norm.shape:", im_norm.shape)
#print ("im_norm.dtype:", im_norm.dtype)
return name, im_norm, im_raw, overlay_count | def post_process_image_3band(df_pos_, data_dir, n_bands=3, im_prefix='',
super_verbose=False):
'''
For a dataframe of image positions (df_pos_), and the tiles of that image,
reconstruct the image. Adapted from basiss.py
Assume that only one image root is in df_pos_
'''
# make sure we don't saturate overlapping, images, so rescale by a factor
# of 4 (this allows us to not use np.uint16 for im_raw)
rescale_factor = 1
# get image width and height
w, h = df_pos_['im_x'].values[0], df_pos_['im_y'].values[0]
# create numpy zeros of appropriate shape
#im_raw = np.zeros((h,w), dtype=np.uint8) # dtype=np.uint16)
im_raw = np.zeros((h,w,n_bands), dtype=np.uint16)
# = create another zero array to record which pixels are overlaid
im_norm = np.zeros((h,w,n_bands), dtype=np.uint8) # dtype=np.uint16)
overlay_count = np.zeros((h,w), dtype=np.uint8)
# iterate through slices
for i, (idx_tmp, item) in enumerate(df_pos_.iterrows()):
if (i % 100) == 0:
print (i, "/", len(df_pos_))
#print (i, "\n", idx_tmp, "\n", item)
[row_val, idx, name, name_full, xmin, ymin, slice_x, slice_y, im_x, im_y] = item
if len(im_prefix) > 0:
name = im_prefix + name
name_full = im_prefix + name_full
# read in image
if n_bands == 3:
im_slice_refine = cv2.imread(os.path.join(data_dir, name_full), 1)
else:
print ("Still need to write code to handle multispecral data...")
return
# rescale make slice?
if rescale_factor != 1:
im_slice_refine = (im_slice_refine / rescale_factor).astype(np.uint8)
#print ("im_slice_refine:", im_slice_refine)
if super_verbose:
print ("item:", item)
x0, x1 = xmin, xmin + slice_x
y0, y1 = ymin, ymin + slice_y
#print ("x0, x1, y0, y1,", x0, x1, y0, y1)
# add data to im_raw for each band
for j in range(n_bands):
im_raw[y0:y1, x0:x1, j] += im_slice_refine[:,:,j]
# update count
overlay_count[y0:y1, x0:x1] += np.ones((slice_y, slice_x), dtype=np.uint8)
# compute normalized im
# if overlay_count == 0, reset to 1
overlay_count[np.where(overlay_count == 0)] = 1
if rescale_factor != 1:
im_raw = im_raw.astype(np.uint8)
#print ("np.max(overlay_count):", np.max(overlay_count))
#print ("np.min(overlay_count):", np.min(overlay_count))
# throws a memory error if using np.divide...
if h < 60000:
for j in range(n_bands):
im_norm[:,:,j] = np.divide(im_raw[:,:,j], overlay_count).astype(np.uint8)
else:
for j in range(h):
#print ("j:", j)
im_norm[j] = (im_raw[j] / overlay_count[j]).astype(np.uint8)
# rescale im_norm
if rescale_factor != 1:
im_norm = (im_norm * rescale_factor).astype(np.uint8)
#print ("im_norm.shape:", im_norm.shape)
#print ("im_norm.dtype:", im_norm.dtype)
return name, im_norm, im_raw, overlay_count |
Python | def clipped(func):
"""
wrapper to clip results of transform to image dtype value range
"""
@wraps(func)
def wrapped_function(img, *args, **kwargs):
dtype, maxval = img.dtype, np.max(img)
return clip(func(img, *args, **kwargs), dtype, maxval)
return wrapped_function | def clipped(func):
"""
wrapper to clip results of transform to image dtype value range
"""
@wraps(func)
def wrapped_function(img, *args, **kwargs):
dtype, maxval = img.dtype, np.max(img)
return clip(func(img, *args, **kwargs), dtype, maxval)
return wrapped_function |
Python | def fix_shift_values(img, *args):
"""
shift values are normally specified in uint, but if your data is float - you need to remap values
"""
if img.dtype == np.float32:
return list(map(lambda x: x / 255, args))
return args | def fix_shift_values(img, *args):
"""
shift values are normally specified in uint, but if your data is float - you need to remap values
"""
if img.dtype == np.float32:
return list(map(lambda x: x / 255, args))
return args |
Python | def apply(self, img, **params):
"""
override this method with transform you need to apply
"""
raise NotImplementedError | def apply(self, img, **params):
"""
override this method with transform you need to apply
"""
raise NotImplementedError |
Python | def mixup_data(x, y, alpha=0.2, use_cuda=True):
"""Returns mixed inputs, pairs of targets, and lambda"""
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam | def mixup_data(x, y, alpha=0.2, use_cuda=True):
"""Returns mixed inputs, pairs of targets, and lambda"""
if alpha > 0:
lam = np.random.beta(alpha, alpha)
else:
lam = 1
batch_size = x.size()[0]
if use_cuda:
index = torch.randperm(batch_size).cuda()
else:
index = torch.randperm(batch_size)
mixed_x = lam * x + (1 - lam) * x[index, :]
y_a, y_b = y, y[index]
return mixed_x, y_a, y_b, lam |
Python | def resnext101_32x8d_wsl(progress=True, pretrained=True, **kwargs):
"""Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnext('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs) | def resnext101_32x8d_wsl(progress=True, pretrained=True, **kwargs):
"""Constructs a ResNeXt-101 32x8 model pre-trained on weakly-supervised data
and finetuned on ImageNet from Figure 5 in
`"Exploring the Limits of Weakly Supervised Pretraining" <https://arxiv.org/abs/1805.00932>`_
Args:
progress (bool): If True, displays a progress bar of the download to stderr.
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnext('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], True, progress, **kwargs) |
Python | def create_masks(path_data, buffer_meters=2, n_bands=3,
burnValue=150, make_plots=True, overwrite_ims=False,
output_df_file='',
header=['name', 'im_file', 'im_vis_file', 'mask_file',
'mask_vis_file']):
'''
Create masks from files in path_data.
Write 8bit images and masks to file.
Return a dataframe of file locations with the following columns:
['name', 'im_file', 'im_vis_file', 'mask_file', 'mask_vis_file']
We record locations of im_vis_file and mask_vis_file in case im_file
or mask_file is not 8-bit or has n_channels != [1,3]
if using 8band data, the RGB-PanSharpen_8bit should already exist, so
3band should be run prior to 8band
'''
t0 = time.time()
# set paths
path_labels = os.path.join(path_data, 'geojson/spacenetroads')
# output directories
path_masks = os.path.join(path_data, 'masks_' + str(buffer_meters) + 'm')
path_masks_plot = os.path.join(
path_data, 'masks_' + str(buffer_meters) + 'm_plots')
# image directories
path_images_vis = os.path.join(path_data, 'RGB-PanSharpen_8bit')
if n_bands == 3:
path_images_raw = os.path.join(path_data, 'PS-RGB')
path_images_8bit = os.path.join(path_data, 'RGB-PanSharpen_8bit')
else:
path_images_raw = os.path.join(path_data, 'MUL-PanSharpen')
path_images_8bit = os.path.join(path_data, 'MUL-PanSharpen_8bit')
if not os.path.exists(path_images_vis):
print("Need to run 3band prior to 8band!")
return
# create directories
for d in [path_images_8bit, path_masks, path_masks_plot]:
if not os.path.exists(d):
os.mkdir(d)
# iterate through images, convert to 8-bit, and create masks
outfile_list = []
im_files = os.listdir(path_images_raw)
nfiles = len(im_files)
for i, im_name in enumerate(im_files):
if not im_name.endswith('.tif'):
continue
# define files
name_root = 'AOI' + im_name.split('AOI')[1].split('.')[0]
im_file_raw = os.path.join(path_images_raw, im_name)
im_file_out = os.path.join(path_images_8bit, im_name)
im_file_out_vis = im_file_out.replace('MUL', 'RGB')
# get visible file (if using 8band imagery we want the 3band file
# for plotting purposes)
# if n_bands == 3:
# im_file_out_vis = im_file_out
# else:
# name_vis = im_name.replace('MUL', 'RGB')
# im_file_out_vis = os.path.join(path_images_vis, name_vis)
# convert to 8bit, if desired
if not os.path.exists(im_file_out) or overwrite_ims:
apls_utils.convertTo8Bit(im_file_raw, im_file_out,
outputPixType='Byte',
outputFormat='GTiff',
rescale_type='rescale',
percentiles=[2, 98])
# determine output files
# label_file = os.path.join(path_labels, 'spacenetroads_AOI_2_Vegas_' \
# + name_root + '.geojson')
label_file = os.path.join(path_labels, 'spacenetroads_' + name_root
+ '.geojson')
label_file_tot = os.path.join(path_labels, label_file)
mask_file = os.path.join(path_masks, name_root + '.png')
if make_plots:
plot_file = os.path.join(path_masks_plot, name_root + '.png')
else:
plot_file = ''
print("\n", i + 1, "/", nfiles)
print(" im_name:", im_name)
print(" name_root:", name_root)
print(" im_file_out:", im_file_out)
print(" mask_file:", mask_file)
print(" output_plot_file:", plot_file)
# create masks
if not os.path.exists(mask_file) or overwrite_ims:
mask, gdf_buffer = apls_utils._get_road_buffer(label_file_tot,
im_file_out_vis,
mask_file,
buffer_meters=buffer_meters,
burnValue=burnValue,
# bufferRoundness=6,
plot_file=plot_file,
figsize=(6, 6),
fontsize=8,
dpi=500,
show_plot=False,
verbose=False)
# resize in ingest so we don't have to save the very large arrays
outfile_list.append([im_name, im_file_out, im_file_out_vis,
mask_file, mask_file])
# make dataframe and save
df = pd.DataFrame(outfile_list, columns=header)
if len(output_df_file) > 0:
df.to_csv(output_df_file, index=False)
print("\ndf.ix[0]:", df.ix[0])
print("\nTotal data length:", len(df))
t4 = time.time()
print("Time to run create_masks():", t4 - t0, "seconds")
return df | def create_masks(path_data, buffer_meters=2, n_bands=3,
burnValue=150, make_plots=True, overwrite_ims=False,
output_df_file='',
header=['name', 'im_file', 'im_vis_file', 'mask_file',
'mask_vis_file']):
'''
Create masks from files in path_data.
Write 8bit images and masks to file.
Return a dataframe of file locations with the following columns:
['name', 'im_file', 'im_vis_file', 'mask_file', 'mask_vis_file']
We record locations of im_vis_file and mask_vis_file in case im_file
or mask_file is not 8-bit or has n_channels != [1,3]
if using 8band data, the RGB-PanSharpen_8bit should already exist, so
3band should be run prior to 8band
'''
t0 = time.time()
# set paths
path_labels = os.path.join(path_data, 'geojson/spacenetroads')
# output directories
path_masks = os.path.join(path_data, 'masks_' + str(buffer_meters) + 'm')
path_masks_plot = os.path.join(
path_data, 'masks_' + str(buffer_meters) + 'm_plots')
# image directories
path_images_vis = os.path.join(path_data, 'RGB-PanSharpen_8bit')
if n_bands == 3:
path_images_raw = os.path.join(path_data, 'PS-RGB')
path_images_8bit = os.path.join(path_data, 'RGB-PanSharpen_8bit')
else:
path_images_raw = os.path.join(path_data, 'MUL-PanSharpen')
path_images_8bit = os.path.join(path_data, 'MUL-PanSharpen_8bit')
if not os.path.exists(path_images_vis):
print("Need to run 3band prior to 8band!")
return
# create directories
for d in [path_images_8bit, path_masks, path_masks_plot]:
if not os.path.exists(d):
os.mkdir(d)
# iterate through images, convert to 8-bit, and create masks
outfile_list = []
im_files = os.listdir(path_images_raw)
nfiles = len(im_files)
for i, im_name in enumerate(im_files):
if not im_name.endswith('.tif'):
continue
# define files
name_root = 'AOI' + im_name.split('AOI')[1].split('.')[0]
im_file_raw = os.path.join(path_images_raw, im_name)
im_file_out = os.path.join(path_images_8bit, im_name)
im_file_out_vis = im_file_out.replace('MUL', 'RGB')
# get visible file (if using 8band imagery we want the 3band file
# for plotting purposes)
# if n_bands == 3:
# im_file_out_vis = im_file_out
# else:
# name_vis = im_name.replace('MUL', 'RGB')
# im_file_out_vis = os.path.join(path_images_vis, name_vis)
# convert to 8bit, if desired
if not os.path.exists(im_file_out) or overwrite_ims:
apls_utils.convertTo8Bit(im_file_raw, im_file_out,
outputPixType='Byte',
outputFormat='GTiff',
rescale_type='rescale',
percentiles=[2, 98])
# determine output files
# label_file = os.path.join(path_labels, 'spacenetroads_AOI_2_Vegas_' \
# + name_root + '.geojson')
label_file = os.path.join(path_labels, 'spacenetroads_' + name_root
+ '.geojson')
label_file_tot = os.path.join(path_labels, label_file)
mask_file = os.path.join(path_masks, name_root + '.png')
if make_plots:
plot_file = os.path.join(path_masks_plot, name_root + '.png')
else:
plot_file = ''
print("\n", i + 1, "/", nfiles)
print(" im_name:", im_name)
print(" name_root:", name_root)
print(" im_file_out:", im_file_out)
print(" mask_file:", mask_file)
print(" output_plot_file:", plot_file)
# create masks
if not os.path.exists(mask_file) or overwrite_ims:
mask, gdf_buffer = apls_utils._get_road_buffer(label_file_tot,
im_file_out_vis,
mask_file,
buffer_meters=buffer_meters,
burnValue=burnValue,
# bufferRoundness=6,
plot_file=plot_file,
figsize=(6, 6),
fontsize=8,
dpi=500,
show_plot=False,
verbose=False)
# resize in ingest so we don't have to save the very large arrays
outfile_list.append([im_name, im_file_out, im_file_out_vis,
mask_file, mask_file])
# make dataframe and save
df = pd.DataFrame(outfile_list, columns=header)
if len(output_df_file) > 0:
df.to_csv(output_df_file, index=False)
print("\ndf.ix[0]:", df.ix[0])
print("\nTotal data length:", len(df))
t4 = time.time()
print("Time to run create_masks():", t4 - t0, "seconds")
return df |
Python | def pkl_dir_to_wkt(pkl_dir, output_csv_path='',
weight_keys=['length', 'travel_time_s'],
verbose=False):
"""
Create submission wkt from directory full of graph pickles
"""
wkt_list = []
pkl_list = sorted([z for z in os.listdir(pkl_dir) if z.endswith('.gpickle')])
with multiprocessing.Pool(16) as pool:
wkt_list = list(pool.map(process_pkl, pkl_list))
wkt_list = [data for cur_data in wkt_list for data in cur_data]
# for i, pkl_name in enumerate(pkl_list):
# G = nx.read_gpickle(os.path.join(pkl_dir, pkl_name))
#
# # ensure an undirected graph
# print(i, "/", len(pkl_list), "num G.nodes:", len(G.nodes()))
#
# # name_root = pkl_name.replace('PS-RGB_', '').replace('PS-MS_', '').split('.')[0]
# # name_root = pkl_name.split('AOI')[-1]
# AOI_root = 'AOI' + pkl_name.split('AOI')[-1]
# name_root = AOI_root.split('.')[0].replace('PS-RGB_', '').replace('PS-MS_', '')
# print("name_root:", name_root)
#
# # if empty, still add to submission
# if len(G.nodes()) == 0:
# wkt_item_root = [name_root, 'LINESTRING EMPTY']
# if len(weight_keys) > 0:
# weights = [0 for w in weight_keys]
# wkt_list.append(wkt_item_root + weights)
# else:
# wkt_list.append(wkt_item_root)
#
# # extract geometry pix wkt, save to list
# seen_edges = set([])
# for i, (u, v, attr_dict) in enumerate(G.edges(data=True)):
# # make sure we haven't already seen this edge
# if (u, v) in seen_edges or (v, u) in seen_edges:
# print(u, v, "already catalogued!")
# continue
# else:
# seen_edges.add((u, v))
# seen_edges.add((v, u))
# geom_pix_wkt = attr_dict['geometry_pix'].wkt
#
# # check edge lnegth
# if attr_dict['length'] > 5000:
# print("Edge too long!, u,v,data:", u,v,attr_dict)
# return
#
# if verbose:
# print(i, "/", len(G.edges()), "u, v:", u, v)
# print(" attr_dict:", attr_dict)
# print(" geom_pix_wkt:", geom_pix_wkt)
#
# wkt_item_root = [name_root, geom_pix_wkt]
# if len(weight_keys) > 0:
# weights = [attr_dict[w] for w in weight_keys]
# if verbose:
# print(" weights:", weights)
# wkt_list.append(wkt_item_root + weights)
# else:
# wkt_list.append(wkt_item_root)
if verbose:
print("wkt_list:", wkt_list)
# create dataframe
if len(weight_keys) > 0:
cols = ['ImageId', 'WKT_Pix'] + weight_keys
else:
cols = ['ImageId', 'WKT_Pix']
# use 'length_m' and 'travel_time_s' instead?
cols_new = []
for z in cols:
if z == 'length':
cols_new.append('length_m')
elif z == 'travel_time':
cols_new.append('travel_time_s')
else:
cols_new.append(z)
cols = cols_new
print("cols:", cols)
df = pd.DataFrame(wkt_list, columns=cols)
print("df:", df)
# save
if len(output_csv_path) > 0:
df.to_csv(output_csv_path, index=False)
return df | def pkl_dir_to_wkt(pkl_dir, output_csv_path='',
weight_keys=['length', 'travel_time_s'],
verbose=False):
"""
Create submission wkt from directory full of graph pickles
"""
wkt_list = []
pkl_list = sorted([z for z in os.listdir(pkl_dir) if z.endswith('.gpickle')])
with multiprocessing.Pool(16) as pool:
wkt_list = list(pool.map(process_pkl, pkl_list))
wkt_list = [data for cur_data in wkt_list for data in cur_data]
# for i, pkl_name in enumerate(pkl_list):
# G = nx.read_gpickle(os.path.join(pkl_dir, pkl_name))
#
# # ensure an undirected graph
# print(i, "/", len(pkl_list), "num G.nodes:", len(G.nodes()))
#
# # name_root = pkl_name.replace('PS-RGB_', '').replace('PS-MS_', '').split('.')[0]
# # name_root = pkl_name.split('AOI')[-1]
# AOI_root = 'AOI' + pkl_name.split('AOI')[-1]
# name_root = AOI_root.split('.')[0].replace('PS-RGB_', '').replace('PS-MS_', '')
# print("name_root:", name_root)
#
# # if empty, still add to submission
# if len(G.nodes()) == 0:
# wkt_item_root = [name_root, 'LINESTRING EMPTY']
# if len(weight_keys) > 0:
# weights = [0 for w in weight_keys]
# wkt_list.append(wkt_item_root + weights)
# else:
# wkt_list.append(wkt_item_root)
#
# # extract geometry pix wkt, save to list
# seen_edges = set([])
# for i, (u, v, attr_dict) in enumerate(G.edges(data=True)):
# # make sure we haven't already seen this edge
# if (u, v) in seen_edges or (v, u) in seen_edges:
# print(u, v, "already catalogued!")
# continue
# else:
# seen_edges.add((u, v))
# seen_edges.add((v, u))
# geom_pix_wkt = attr_dict['geometry_pix'].wkt
#
# # check edge lnegth
# if attr_dict['length'] > 5000:
# print("Edge too long!, u,v,data:", u,v,attr_dict)
# return
#
# if verbose:
# print(i, "/", len(G.edges()), "u, v:", u, v)
# print(" attr_dict:", attr_dict)
# print(" geom_pix_wkt:", geom_pix_wkt)
#
# wkt_item_root = [name_root, geom_pix_wkt]
# if len(weight_keys) > 0:
# weights = [attr_dict[w] for w in weight_keys]
# if verbose:
# print(" weights:", weights)
# wkt_list.append(wkt_item_root + weights)
# else:
# wkt_list.append(wkt_item_root)
if verbose:
print("wkt_list:", wkt_list)
# create dataframe
if len(weight_keys) > 0:
cols = ['ImageId', 'WKT_Pix'] + weight_keys
else:
cols = ['ImageId', 'WKT_Pix']
# use 'length_m' and 'travel_time_s' instead?
cols_new = []
for z in cols:
if z == 'length':
cols_new.append('length_m')
elif z == 'travel_time':
cols_new.append('travel_time_s')
else:
cols_new.append(z)
cols = cols_new
print("cols:", cols)
df = pd.DataFrame(wkt_list, columns=cols)
print("df:", df)
# save
if len(output_csv_path) > 0:
df.to_csv(output_csv_path, index=False)
return df |
Python | def infer_travel_time(G_,
mask,
conv_dict,
min_z=128,
dx=4,
dy=4,
percentile=90,
use_totband=True,
use_weighted_mean=True,
variable_edge_speed=False,
verbose=False):
'''Get an estimate of the average speed and travel time of each edge
in the graph from the mask and conversion dictionary
For each edge, get the geometry in pixel coords
For each point, get the neareast neighbors in the maks and infer
the local speed'''
mph_to_mps = 0.44704 # miles per hour to meters per second
for i,(u, v, edge_data) in enumerate(G_.edges(data=True)):
if verbose: #(i % 100) == 0:
logger.info("\n" + str(i) + " " + str(u) + " " + str(v) + " " \
+ str(edge_data))
if (i % 1000) == 0:
logger.info(str(i) + " / " + str(len(G_.edges())) + " edges")
tot_hours, mean_speed_mph, length_miles = \
get_edge_time_properties(mask, edge_data, conv_dict,
min_z=min_z, dx=dx, dy=dy,
percentile=percentile,
use_totband=use_totband,
use_weighted_mean=use_weighted_mean,
variable_edge_speed=variable_edge_speed,
verbose=verbose)
# update edges
edge_data['Travel Time (h)'] = tot_hours
edge_data['inferred_speed_mph'] = np.round(mean_speed_mph, 2)
edge_data['length_miles'] = length_miles
edge_data['inferred_speed_mps'] = np.round(mean_speed_mph * mph_to_mps, 2)
edge_data['travel_time_s'] = np.round(3600. * tot_hours, 3)
# edge_data['travel_time'] = np.round(3600. * tot_hours, 3)
return G_ | def infer_travel_time(G_,
mask,
conv_dict,
min_z=128,
dx=4,
dy=4,
percentile=90,
use_totband=True,
use_weighted_mean=True,
variable_edge_speed=False,
verbose=False):
'''Get an estimate of the average speed and travel time of each edge
in the graph from the mask and conversion dictionary
For each edge, get the geometry in pixel coords
For each point, get the neareast neighbors in the maks and infer
the local speed'''
mph_to_mps = 0.44704 # miles per hour to meters per second
for i,(u, v, edge_data) in enumerate(G_.edges(data=True)):
if verbose: #(i % 100) == 0:
logger.info("\n" + str(i) + " " + str(u) + " " + str(v) + " " \
+ str(edge_data))
if (i % 1000) == 0:
logger.info(str(i) + " / " + str(len(G_.edges())) + " edges")
tot_hours, mean_speed_mph, length_miles = \
get_edge_time_properties(mask, edge_data, conv_dict,
min_z=min_z, dx=dx, dy=dy,
percentile=percentile,
use_totband=use_totband,
use_weighted_mean=use_weighted_mean,
variable_edge_speed=variable_edge_speed,
verbose=verbose)
# update edges
edge_data['Travel Time (h)'] = tot_hours
edge_data['inferred_speed_mph'] = np.round(mean_speed_mph, 2)
edge_data['length_miles'] = length_miles
edge_data['inferred_speed_mps'] = np.round(mean_speed_mph * mph_to_mps, 2)
edge_data['travel_time_s'] = np.round(3600. * tot_hours, 3)
# edge_data['travel_time'] = np.round(3600. * tot_hours, 3)
return G_ |
Python | def add_travel_time_dir(graph_dir, mask_dir, conv_dict, graph_dir_out,
min_z=128, dx=4, dy=4, percentile=90,
use_totband=True, use_weighted_mean=True,
variable_edge_speed=False, mask_prefix='',
save_shapefiles=True,
verbose=False):
'''Update graph properties to include travel time for entire directory'''
pickle_protocol = 4 # 4 is most recent, python 2.7 can't read 4
logger.info("Updating graph properties to include travel time")
logger.info(" Writing to: " + str(graph_dir_out))
os.makedirs(graph_dir_out, exist_ok=True)
image_names = sorted([z for z in os.listdir(mask_dir) if z.endswith('.tif')])
for i,image_name in enumerate(image_names):
im_root = image_name.split('.')[0]
if len(mask_prefix) > 0:
im_root = im_root.split(mask_prefix)[-1]
out_file = os.path.join(graph_dir_out, im_root + '.gpickle')
if (i % 1) == 0:
logger.info("\n" + str(i+1) + " / " + str(len(image_names)) + " " + image_name + " " + im_root)
mask_path = os.path.join(mask_dir, image_name)
graph_path = os.path.join(graph_dir, im_root + '.gpickle')
if not os.path.exists(graph_path):
logger.info(" ", i, "DNE, skipping: " + str(graph_path))
return
# continue
mask = skimage.io.imread(mask_path)
G_raw = nx.read_gpickle(graph_path)
# see if it's empty
if len(G_raw.nodes()) == 0:
nx.write_gpickle(G_raw, out_file, protocol=pickle_protocol)
continue
G = infer_travel_time(G_raw, mask, conv_dict,
min_z=min_z, dx=dx, dy=dy,
percentile=percentile,
use_totband=use_totband,
use_weighted_mean=use_weighted_mean,
variable_edge_speed=variable_edge_speed,
verbose=verbose)
G = G.to_undirected()
nx.write_gpickle(G, out_file, protocol=pickle_protocol)
return | def add_travel_time_dir(graph_dir, mask_dir, conv_dict, graph_dir_out,
min_z=128, dx=4, dy=4, percentile=90,
use_totband=True, use_weighted_mean=True,
variable_edge_speed=False, mask_prefix='',
save_shapefiles=True,
verbose=False):
'''Update graph properties to include travel time for entire directory'''
pickle_protocol = 4 # 4 is most recent, python 2.7 can't read 4
logger.info("Updating graph properties to include travel time")
logger.info(" Writing to: " + str(graph_dir_out))
os.makedirs(graph_dir_out, exist_ok=True)
image_names = sorted([z for z in os.listdir(mask_dir) if z.endswith('.tif')])
for i,image_name in enumerate(image_names):
im_root = image_name.split('.')[0]
if len(mask_prefix) > 0:
im_root = im_root.split(mask_prefix)[-1]
out_file = os.path.join(graph_dir_out, im_root + '.gpickle')
if (i % 1) == 0:
logger.info("\n" + str(i+1) + " / " + str(len(image_names)) + " " + image_name + " " + im_root)
mask_path = os.path.join(mask_dir, image_name)
graph_path = os.path.join(graph_dir, im_root + '.gpickle')
if not os.path.exists(graph_path):
logger.info(" ", i, "DNE, skipping: " + str(graph_path))
return
# continue
mask = skimage.io.imread(mask_path)
G_raw = nx.read_gpickle(graph_path)
# see if it's empty
if len(G_raw.nodes()) == 0:
nx.write_gpickle(G_raw, out_file, protocol=pickle_protocol)
continue
G = infer_travel_time(G_raw, mask, conv_dict,
min_z=min_z, dx=dx, dy=dy,
percentile=percentile,
use_totband=use_totband,
use_weighted_mean=use_weighted_mean,
variable_edge_speed=variable_edge_speed,
verbose=verbose)
G = G.to_undirected()
nx.write_gpickle(G, out_file, protocol=pickle_protocol)
return |
Python | def infer_speed(conf):
'''See _arr_slicing_speed.ipynb for better tests'''
t0 = time.time()
percentile = 85
dx, dy = 4, 4 # nearest neighbors patch size
min_z = 128 # min z value to consider a hit
#width_key, width_mult = 4, 1 # constant width
if conf.num_classes == 8:
use_totband = True
else:
use_totband = False
save_shapefiles = False
use_weighted_mean = True
variable_edge_speed = False
verbose = False
# output pkl
graph_dir = "{}/working/sp5r2/models/graphs/{}".format(
"/wdata", conf.modelname)
Path(graph_dir).mkdir(parents=True, exist_ok=True)
preds_dirname = conf.modelname.replace('_th06', '')
merge_dir = (
"/wdata" + "/working/sp5r2/models/preds/" +
preds_dirname + "/merged_test")
mask_dir = merge_dir
mask_prefix = ''
if conf.num_folds == 1:
folds_dir = (
"/wdata" + "/working/sp5r2/models/preds/" +
preds_dirname + "/fold0_test")
mask_dir = folds_dir
mask_prefix = 'fold0_'
# output pkl
graph_speed_dir = "{}/working/sp5r2/models/graphs_speed/{}".format(
"/wdata", conf.modelname)
Path(graph_speed_dir).mkdir(parents=True, exist_ok=True)
logger.info("graph_speed_dir: " + graph_speed_dir)
# speed conversion dataframes (see _speed_data_prep.ipynb)
speed_conversion_file_binned = os.path.join(
"/wdata" + '/input/train/masks_base/',
'roads_train_speed_conversion_binned.csv',
)
# load conversion file
# get the conversion diction between pixel mask values and road speed (mph)
assert conf.num_classes > 1
conv_df, conv_dict \
= load_speed_conversion_dict_binned(speed_conversion_file_binned)
logger.info("speed conv_dict: " + str(conv_dict))
# Add travel time to entire dir
add_travel_time_dir(graph_dir,
mask_dir,
conv_dict,
graph_speed_dir,
min_z=min_z,
dx=dx, dy=dy,
percentile=percentile,
use_totband=use_totband,
use_weighted_mean=use_weighted_mean,
variable_edge_speed=variable_edge_speed,
mask_prefix=mask_prefix,
save_shapefiles=save_shapefiles,
verbose=verbose)
t1 = time.time()
logger.info("Time to execute add_travel_time_dir(): {x} seconds".format(x=t1-t0)) | def infer_speed(conf):
'''See _arr_slicing_speed.ipynb for better tests'''
t0 = time.time()
percentile = 85
dx, dy = 4, 4 # nearest neighbors patch size
min_z = 128 # min z value to consider a hit
#width_key, width_mult = 4, 1 # constant width
if conf.num_classes == 8:
use_totband = True
else:
use_totband = False
save_shapefiles = False
use_weighted_mean = True
variable_edge_speed = False
verbose = False
# output pkl
graph_dir = "{}/working/sp5r2/models/graphs/{}".format(
"/wdata", conf.modelname)
Path(graph_dir).mkdir(parents=True, exist_ok=True)
preds_dirname = conf.modelname.replace('_th06', '')
merge_dir = (
"/wdata" + "/working/sp5r2/models/preds/" +
preds_dirname + "/merged_test")
mask_dir = merge_dir
mask_prefix = ''
if conf.num_folds == 1:
folds_dir = (
"/wdata" + "/working/sp5r2/models/preds/" +
preds_dirname + "/fold0_test")
mask_dir = folds_dir
mask_prefix = 'fold0_'
# output pkl
graph_speed_dir = "{}/working/sp5r2/models/graphs_speed/{}".format(
"/wdata", conf.modelname)
Path(graph_speed_dir).mkdir(parents=True, exist_ok=True)
logger.info("graph_speed_dir: " + graph_speed_dir)
# speed conversion dataframes (see _speed_data_prep.ipynb)
speed_conversion_file_binned = os.path.join(
"/wdata" + '/input/train/masks_base/',
'roads_train_speed_conversion_binned.csv',
)
# load conversion file
# get the conversion diction between pixel mask values and road speed (mph)
assert conf.num_classes > 1
conv_df, conv_dict \
= load_speed_conversion_dict_binned(speed_conversion_file_binned)
logger.info("speed conv_dict: " + str(conv_dict))
# Add travel time to entire dir
add_travel_time_dir(graph_dir,
mask_dir,
conv_dict,
graph_speed_dir,
min_z=min_z,
dx=dx, dy=dy,
percentile=percentile,
use_totband=use_totband,
use_weighted_mean=use_weighted_mean,
variable_edge_speed=variable_edge_speed,
mask_prefix=mask_prefix,
save_shapefiles=save_shapefiles,
verbose=verbose)
t1 = time.time()
logger.info("Time to execute add_travel_time_dir(): {x} seconds".format(x=t1-t0)) |
Python | def clean_sub_graphs(G_, min_length=150, max_nodes_to_skip=30,
weight='length_pix', verbose=True,
super_verbose=False):
'''Remove subgraphs with a max path length less than min_length,
if the subgraph has more than max_noxes_to_skip, don't check length
(this step great improves processing time)'''
if len(list(G_.nodes())) == 0:
return G_
if verbose:
print ("Running clean_sub_graphs...")
sub_graphs = list(nx.connected_component_subgraphs(G_))
bad_nodes = []
if verbose:
print (" len(G_.nodes()):", len(list(G_.nodes())) )
print (" len(G_.edges()):", len(list(G_.edges())) )
if super_verbose:
print ("G_.nodes:", G_.nodes())
edge_tmp = G_.edges()[np.random.randint(len(G_.edges()))]
print (edge_tmp, "G.edge props:", G_.edge[edge_tmp[0]][edge_tmp[1]])
for G_sub in sub_graphs:
# don't check length if too many nodes in subgraph
if len(G_sub.nodes()) > max_nodes_to_skip:
continue
else:
all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))
if super_verbose:
print (" \nGs.nodes:", G_sub.nodes() )
print (" all_lengths:", all_lengths )
# get all lenghts
lens = []
#for u,v in all_lengths.iteritems():
for u in all_lengths.keys():
v = all_lengths[u]
#for uprime, vprime in v.iteritems():
for uprime in v.keys():
vprime = v[uprime]
lens.append(vprime)
if super_verbose:
print (" u, v", u,v )
print (" uprime, vprime:", uprime, vprime )
max_len = np.max(lens)
if super_verbose:
print (" Max length of path:", max_len)
if max_len < min_length:
bad_nodes.extend(G_sub.nodes())
if super_verbose:
print (" appending to bad_nodes:", G_sub.nodes())
# remove bad_nodes
G_.remove_nodes_from(bad_nodes)
if verbose:
print (" num bad_nodes:", len(bad_nodes))
#print ("bad_nodes:", bad_nodes)
print (" len(G'.nodes()):", len(G_.nodes()))
print (" len(G'.edges()):", len(G_.edges()))
if super_verbose:
print (" G_.nodes:", G_.nodes())
return G_ | def clean_sub_graphs(G_, min_length=150, max_nodes_to_skip=30,
weight='length_pix', verbose=True,
super_verbose=False):
'''Remove subgraphs with a max path length less than min_length,
if the subgraph has more than max_noxes_to_skip, don't check length
(this step great improves processing time)'''
if len(list(G_.nodes())) == 0:
return G_
if verbose:
print ("Running clean_sub_graphs...")
sub_graphs = list(nx.connected_component_subgraphs(G_))
bad_nodes = []
if verbose:
print (" len(G_.nodes()):", len(list(G_.nodes())) )
print (" len(G_.edges()):", len(list(G_.edges())) )
if super_verbose:
print ("G_.nodes:", G_.nodes())
edge_tmp = G_.edges()[np.random.randint(len(G_.edges()))]
print (edge_tmp, "G.edge props:", G_.edge[edge_tmp[0]][edge_tmp[1]])
for G_sub in sub_graphs:
# don't check length if too many nodes in subgraph
if len(G_sub.nodes()) > max_nodes_to_skip:
continue
else:
all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))
if super_verbose:
print (" \nGs.nodes:", G_sub.nodes() )
print (" all_lengths:", all_lengths )
# get all lenghts
lens = []
#for u,v in all_lengths.iteritems():
for u in all_lengths.keys():
v = all_lengths[u]
#for uprime, vprime in v.iteritems():
for uprime in v.keys():
vprime = v[uprime]
lens.append(vprime)
if super_verbose:
print (" u, v", u,v )
print (" uprime, vprime:", uprime, vprime )
max_len = np.max(lens)
if super_verbose:
print (" Max length of path:", max_len)
if max_len < min_length:
bad_nodes.extend(G_sub.nodes())
if super_verbose:
print (" appending to bad_nodes:", G_sub.nodes())
# remove bad_nodes
G_.remove_nodes_from(bad_nodes)
if verbose:
print (" num bad_nodes:", len(bad_nodes))
#print ("bad_nodes:", bad_nodes)
print (" len(G'.nodes()):", len(G_.nodes()))
print (" len(G'.edges()):", len(G_.edges()))
if super_verbose:
print (" G_.nodes:", G_.nodes())
return G_ |
Python | def remove_short_edges(G_, min_spur_length_m=100, verbose=False):
"""Remove unconnected edges shorter than the desired length"""
if verbose:
print("Remove shoert edges")
deg_list = list(G_.degree)
# iterate through list
bad_nodes = []
for i, (n, deg) in enumerate(deg_list):
# if verbose and (i % 500) == 0:
# print(n, deg)
# check if node has only one neighbor
if deg == 1:
# get edge
edge = list(G_.edges(n))
u, v = edge[0]
# get edge length
edge_props = G_.get_edge_data(u, v, 0)
length = edge_props['length']
# edge_props = G_.edges([u, v])
if length < min_spur_length_m:
bad_nodes.append(n)
if verbose:
print(i, "/", len(list(G_.nodes())),
"n, deg, u, v, length:", n, deg, u, v, length)
if verbose:
print("bad_nodes:", bad_nodes)
G_.remove_nodes_from(bad_nodes)
if verbose:
print("num remaining nodes:", len(list(G_.nodes())))
return G_ | def remove_short_edges(G_, min_spur_length_m=100, verbose=False):
"""Remove unconnected edges shorter than the desired length"""
if verbose:
print("Remove shoert edges")
deg_list = list(G_.degree)
# iterate through list
bad_nodes = []
for i, (n, deg) in enumerate(deg_list):
# if verbose and (i % 500) == 0:
# print(n, deg)
# check if node has only one neighbor
if deg == 1:
# get edge
edge = list(G_.edges(n))
u, v = edge[0]
# get edge length
edge_props = G_.get_edge_data(u, v, 0)
length = edge_props['length']
# edge_props = G_.edges([u, v])
if length < min_spur_length_m:
bad_nodes.append(n)
if verbose:
print(i, "/", len(list(G_.nodes())),
"n, deg, u, v, length:", n, deg, u, v, length)
if verbose:
print("bad_nodes:", bad_nodes)
G_.remove_nodes_from(bad_nodes)
if verbose:
print("num remaining nodes:", len(list(G_.nodes())))
return G_ |
Python | def wkt_list_to_nodes_edges(wkt_list):
'''Convert wkt list to nodes and edges
Make an edge between each node in linestring. Since one linestring
may contain multiple edges, this is the safest approach'''
node_loc_set = set() # set of edge locations
node_loc_dic = {} # key = node idx, val = location
node_loc_dic_rev = {} # key = location, val = node idx
edge_loc_set = set() # set of edge locations
edge_dic = {} # edge properties
node_iter = 0
edge_iter = 0
for i,lstring in enumerate(wkt_list):
# get lstring properties
shape = shapely.wkt.loads(lstring)
xs, ys = shape.coords.xy
length_orig = shape.length
# iterate through coords in line to create edges between every point
for j,(x,y) in enumerate(zip(xs, ys)):
loc = (x,y)
# for first item just make node, not edge
if j == 0:
# if not yet seen, create new node
if loc not in node_loc_set:
node_loc_set.add(loc)
node_loc_dic[node_iter] = loc
node_loc_dic_rev[loc] = node_iter
node = node_iter
node_iter += 1
# if not first node in edge, retrieve previous node and build edge
else:
prev_loc = (xs[j-1], ys[j-1])
#print ("prev_loc:", prev_loc)
prev_node = node_loc_dic_rev[prev_loc]
# if new, create new node
if loc not in node_loc_set:
node_loc_set.add(loc)
node_loc_dic[node_iter] = loc
node_loc_dic_rev[loc] = node_iter
node = node_iter
node_iter += 1
# if seen before, retrieve node properties
else:
node = node_loc_dic_rev[loc]
# add edge, which is start_node to end_node
edge_loc = (loc, prev_loc)
edge_loc_rev = (prev_loc, loc)
# shouldn't be duplicate edges, so break if we see one
if (edge_loc in edge_loc_set) or (edge_loc_rev in edge_loc_set):
print ("Oops, edge already seen, returning:", edge_loc)
return
# get distance to prev_loc and current loc
proj_prev = shape.project(Point(prev_loc))
proj = shape.project(Point(loc))
# edge length is the diffence of the two projected lengths
# along the linestring
edge_length = abs(proj - proj_prev)
# make linestring
line_out = LineString([prev_loc, loc])
line_out_wkt = line_out.wkt
edge_props = {'start': prev_node,
'start_loc_pix': prev_loc,
'end': node,
'end_loc_pix': loc,
'length_pix': edge_length,
'wkt_pix': line_out_wkt,
'geometry_pix': line_out,
'osmid': i}
#print ("edge_props", edge_props)
edge_loc_set.add(edge_loc)
edge_dic[edge_iter] = edge_props
edge_iter += 1
return node_loc_dic, edge_dic | def wkt_list_to_nodes_edges(wkt_list):
'''Convert wkt list to nodes and edges
Make an edge between each node in linestring. Since one linestring
may contain multiple edges, this is the safest approach'''
node_loc_set = set() # set of edge locations
node_loc_dic = {} # key = node idx, val = location
node_loc_dic_rev = {} # key = location, val = node idx
edge_loc_set = set() # set of edge locations
edge_dic = {} # edge properties
node_iter = 0
edge_iter = 0
for i,lstring in enumerate(wkt_list):
# get lstring properties
shape = shapely.wkt.loads(lstring)
xs, ys = shape.coords.xy
length_orig = shape.length
# iterate through coords in line to create edges between every point
for j,(x,y) in enumerate(zip(xs, ys)):
loc = (x,y)
# for first item just make node, not edge
if j == 0:
# if not yet seen, create new node
if loc not in node_loc_set:
node_loc_set.add(loc)
node_loc_dic[node_iter] = loc
node_loc_dic_rev[loc] = node_iter
node = node_iter
node_iter += 1
# if not first node in edge, retrieve previous node and build edge
else:
prev_loc = (xs[j-1], ys[j-1])
#print ("prev_loc:", prev_loc)
prev_node = node_loc_dic_rev[prev_loc]
# if new, create new node
if loc not in node_loc_set:
node_loc_set.add(loc)
node_loc_dic[node_iter] = loc
node_loc_dic_rev[loc] = node_iter
node = node_iter
node_iter += 1
# if seen before, retrieve node properties
else:
node = node_loc_dic_rev[loc]
# add edge, which is start_node to end_node
edge_loc = (loc, prev_loc)
edge_loc_rev = (prev_loc, loc)
# shouldn't be duplicate edges, so break if we see one
if (edge_loc in edge_loc_set) or (edge_loc_rev in edge_loc_set):
print ("Oops, edge already seen, returning:", edge_loc)
return
# get distance to prev_loc and current loc
proj_prev = shape.project(Point(prev_loc))
proj = shape.project(Point(loc))
# edge length is the diffence of the two projected lengths
# along the linestring
edge_length = abs(proj - proj_prev)
# make linestring
line_out = LineString([prev_loc, loc])
line_out_wkt = line_out.wkt
edge_props = {'start': prev_node,
'start_loc_pix': prev_loc,
'end': node,
'end_loc_pix': loc,
'length_pix': edge_length,
'wkt_pix': line_out_wkt,
'geometry_pix': line_out,
'osmid': i}
#print ("edge_props", edge_props)
edge_loc_set.add(edge_loc)
edge_dic[edge_iter] = edge_props
edge_iter += 1
return node_loc_dic, edge_dic |
Python | def wkt_list_to_nodes_edges_sloppy(wkt_list):
'''Convert wkt list to nodes and edges
Assumes each linestring corresponds to a unique edge
Since this is not always the case, this function fails if a linestring
contains multiple edges'''
node_loc_set = set() # set of edge locations
node_loc_dic = {} # key = node idx, val = location
node_loc_dic_rev = {} # key = location, val = node idx
edge_dic = {} # edge properties
node_iter = 0
edge_iter = 0
for lstring in wkt_list:
# get lstring properties
shape = shapely.wkt.loads(lstring)
x, y = shape.coords.xy
length = shape.length
# set start node
start_loc = (x[0], y[0])
# if new, create new node
if start_loc not in node_loc_set:
node_loc_set.add(start_loc)
node_loc_dic[node_iter] = start_loc
node_loc_dic_rev[start_loc] = node_iter
start_node = node_iter
node_iter += 1
# if seen before, retrieve node properties
else:
start_node = node_loc_dic_rev[start_loc]
# set end node (just like start node)
end_loc = (x[-1], y[-1])
# if new, create new node
if end_loc not in node_loc_set:
node_loc_set.add(end_loc)
node_loc_dic[node_iter] = end_loc
node_loc_dic_rev[end_loc] = node_iter
end_node = node_iter
node_iter += 1
# if seen before, retrieve node properties
else:
end_node = node_loc_dic_rev[end_loc]
# add edge, which is start_node to end_node
edge_props = {'start': start_node,
'start_loc_pix': start_loc,
'end': end_node,
'end_loc_pix': end_loc,
'length_pix': length,
'wkt_pix': lstring,
'geometry_pix': shape}
edge_dic[edge_iter] = edge_props
edge_iter += 1
return node_loc_dic, edge_dic | def wkt_list_to_nodes_edges_sloppy(wkt_list):
'''Convert wkt list to nodes and edges
Assumes each linestring corresponds to a unique edge
Since this is not always the case, this function fails if a linestring
contains multiple edges'''
node_loc_set = set() # set of edge locations
node_loc_dic = {} # key = node idx, val = location
node_loc_dic_rev = {} # key = location, val = node idx
edge_dic = {} # edge properties
node_iter = 0
edge_iter = 0
for lstring in wkt_list:
# get lstring properties
shape = shapely.wkt.loads(lstring)
x, y = shape.coords.xy
length = shape.length
# set start node
start_loc = (x[0], y[0])
# if new, create new node
if start_loc not in node_loc_set:
node_loc_set.add(start_loc)
node_loc_dic[node_iter] = start_loc
node_loc_dic_rev[start_loc] = node_iter
start_node = node_iter
node_iter += 1
# if seen before, retrieve node properties
else:
start_node = node_loc_dic_rev[start_loc]
# set end node (just like start node)
end_loc = (x[-1], y[-1])
# if new, create new node
if end_loc not in node_loc_set:
node_loc_set.add(end_loc)
node_loc_dic[node_iter] = end_loc
node_loc_dic_rev[end_loc] = node_iter
end_node = node_iter
node_iter += 1
# if seen before, retrieve node properties
else:
end_node = node_loc_dic_rev[end_loc]
# add edge, which is start_node to end_node
edge_props = {'start': start_node,
'start_loc_pix': start_loc,
'end': end_node,
'end_loc_pix': end_loc,
'length_pix': length,
'wkt_pix': lstring,
'geometry_pix': shape}
edge_dic[edge_iter] = edge_props
edge_iter += 1
return node_loc_dic, edge_dic |
Python | def convert_pix_lstring_to_geo(wkt_lstring,
im_file,
utm_zone=None,
utm_letter=None,
verbose=False):
'''Convert linestring in pixel coords to geo coords
If zone or letter changes inthe middle of line, it's all screwed up, so
force zone and letter based on first point
(latitude, longitude, force_zone_number=None, force_zone_letter=None)
Or just force utm zone and letter explicitly
'''
shape = wkt_lstring #shapely.wkt.loads(lstring)
x_pixs, y_pixs = shape.coords.xy
coords_latlon = []
coords_utm = []
for i,(x,y) in enumerate(zip(x_pixs, y_pixs)):
targetSR = osr.SpatialReference()
targetSR.ImportFromEPSG(4326)
lon, lat = pixelToGeoCoord(x, y, im_file, targetSR=targetSR)
if utm_zone and utm_letter:
[utm_east, utm_north, _, _] = utm.from_latlon(lat, lon,
force_zone_number=utm_zone, force_zone_letter=utm_letter)
else:
[utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)
if verbose:
print("lat lon, utm_east, utm_north, utm_zone, utm_letter]",
[lat, lon, utm_east, utm_north, utm_zone, utm_letter])
coords_utm.append([utm_east, utm_north])
coords_latlon.append([lon, lat])
lstring_latlon = LineString([Point(z) for z in coords_latlon])
lstring_utm = LineString([Point(z) for z in coords_utm])
return lstring_latlon, lstring_utm, utm_zone, utm_letter | def convert_pix_lstring_to_geo(wkt_lstring,
im_file,
utm_zone=None,
utm_letter=None,
verbose=False):
'''Convert linestring in pixel coords to geo coords
If zone or letter changes inthe middle of line, it's all screwed up, so
force zone and letter based on first point
(latitude, longitude, force_zone_number=None, force_zone_letter=None)
Or just force utm zone and letter explicitly
'''
shape = wkt_lstring #shapely.wkt.loads(lstring)
x_pixs, y_pixs = shape.coords.xy
coords_latlon = []
coords_utm = []
for i,(x,y) in enumerate(zip(x_pixs, y_pixs)):
targetSR = osr.SpatialReference()
targetSR.ImportFromEPSG(4326)
lon, lat = pixelToGeoCoord(x, y, im_file, targetSR=targetSR)
if utm_zone and utm_letter:
[utm_east, utm_north, _, _] = utm.from_latlon(lat, lon,
force_zone_number=utm_zone, force_zone_letter=utm_letter)
else:
[utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)
if verbose:
print("lat lon, utm_east, utm_north, utm_zone, utm_letter]",
[lat, lon, utm_east, utm_north, utm_zone, utm_letter])
coords_utm.append([utm_east, utm_north])
coords_latlon.append([lon, lat])
lstring_latlon = LineString([Point(z) for z in coords_latlon])
lstring_utm = LineString([Point(z) for z in coords_utm])
return lstring_latlon, lstring_utm, utm_zone, utm_letter |
Python | def create_optimizer(optimizer_config, model, master_params=None):
"""Creates optimizer and schedule from configuration
Parameters
----------
optimizer_config : dict
Dictionary containing the configuration options for the optimizer.
model : Model
The network model.
Returns
-------
optimizer : Optimizer
The optimizer.
scheduler : LRScheduler
The learning rate scheduler.
"""
if optimizer_config["classifier_lr"] != -1:
# Separate classifier parameters from all others
net_params = []
classifier_params = []
for k, v in model.named_parameters():
if not v.requires_grad:
continue
if k.find("encoder") != -1:
net_params.append(v)
else:
classifier_params.append(v)
params = [
{"params": net_params},
{"params": classifier_params, "lr": optimizer_config["classifier_lr"]},
]
else:
if master_params:
params = master_params
else:
params = model.parameters()
if optimizer_config["type"] == "SGD":
optimizer = optim.SGD(params,
lr=optimizer_config["learning_rate"],
momentum=optimizer_config["momentum"],
weight_decay=optimizer_config["weight_decay"],
nesterov=optimizer_config["nesterov"])
elif optimizer_config["type"] == "Adam":
optimizer = optim.Adam(params,
lr=optimizer_config["learning_rate"],
weight_decay=optimizer_config["weight_decay"])
elif optimizer_config["type"] == "AdamW":
optimizer = optim.Adam(params,
lr=optimizer_config["learning_rate"],
weight_decay=optimizer_config["weight_decay"])
elif optimizer_config["type"] == "RAdam":
optimizer = RAdam(params,
lr=optimizer_config["learning_rate"],
weight_decay=optimizer_config["weight_decay"])
elif optimizer_config["type"] == "RmsProp":
optimizer = optim.Adam(params,
lr=optimizer_config["learning_rate"],
weight_decay=optimizer_config["weight_decay"])
else:
raise KeyError("unrecognized optimizer {}".format(optimizer_config["type"]))
if optimizer_config["schedule"]["type"] == "step":
scheduler = LRStepScheduler(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "multistep":
scheduler = MultiStepLR(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "exponential":
scheduler = ExponentialLRScheduler(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "poly":
scheduler = PolyLR(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "constant":
scheduler = lr_scheduler.LambdaLR(optimizer, lambda epoch: 1.0)
elif optimizer_config["schedule"]["type"] == "linear":
def linear_lr(it):
return it * optimizer_config["schedule"]["params"]["alpha"] + optimizer_config["schedule"]["params"]["beta"]
scheduler = lr_scheduler.LambdaLR(optimizer, linear_lr)
return optimizer, scheduler | def create_optimizer(optimizer_config, model, master_params=None):
"""Creates optimizer and schedule from configuration
Parameters
----------
optimizer_config : dict
Dictionary containing the configuration options for the optimizer.
model : Model
The network model.
Returns
-------
optimizer : Optimizer
The optimizer.
scheduler : LRScheduler
The learning rate scheduler.
"""
if optimizer_config["classifier_lr"] != -1:
# Separate classifier parameters from all others
net_params = []
classifier_params = []
for k, v in model.named_parameters():
if not v.requires_grad:
continue
if k.find("encoder") != -1:
net_params.append(v)
else:
classifier_params.append(v)
params = [
{"params": net_params},
{"params": classifier_params, "lr": optimizer_config["classifier_lr"]},
]
else:
if master_params:
params = master_params
else:
params = model.parameters()
if optimizer_config["type"] == "SGD":
optimizer = optim.SGD(params,
lr=optimizer_config["learning_rate"],
momentum=optimizer_config["momentum"],
weight_decay=optimizer_config["weight_decay"],
nesterov=optimizer_config["nesterov"])
elif optimizer_config["type"] == "Adam":
optimizer = optim.Adam(params,
lr=optimizer_config["learning_rate"],
weight_decay=optimizer_config["weight_decay"])
elif optimizer_config["type"] == "AdamW":
optimizer = optim.Adam(params,
lr=optimizer_config["learning_rate"],
weight_decay=optimizer_config["weight_decay"])
elif optimizer_config["type"] == "RAdam":
optimizer = RAdam(params,
lr=optimizer_config["learning_rate"],
weight_decay=optimizer_config["weight_decay"])
elif optimizer_config["type"] == "RmsProp":
optimizer = optim.Adam(params,
lr=optimizer_config["learning_rate"],
weight_decay=optimizer_config["weight_decay"])
else:
raise KeyError("unrecognized optimizer {}".format(optimizer_config["type"]))
if optimizer_config["schedule"]["type"] == "step":
scheduler = LRStepScheduler(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "multistep":
scheduler = MultiStepLR(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "exponential":
scheduler = ExponentialLRScheduler(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "poly":
scheduler = PolyLR(optimizer, **optimizer_config["schedule"]["params"])
elif optimizer_config["schedule"]["type"] == "constant":
scheduler = lr_scheduler.LambdaLR(optimizer, lambda epoch: 1.0)
elif optimizer_config["schedule"]["type"] == "linear":
def linear_lr(it):
return it * optimizer_config["schedule"]["params"]["alpha"] + optimizer_config["schedule"]["params"]["beta"]
scheduler = lr_scheduler.LambdaLR(optimizer, linear_lr)
return optimizer, scheduler |
Python | def iou_binary(preds, labels, EMPTY=1.0, ignore=None, per_image=True):
"""
IoU for foreground class
binary: 1 foreground, 0 background
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
intersection = ((label == 1) & (pred == 1)).sum()
union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
if not union:
iou = EMPTY
else:
iou = float(intersection) / union
ious.append(iou)
iou = mean(ious) # mean accross images if per_image
return 100 * iou | def iou_binary(preds, labels, EMPTY=1.0, ignore=None, per_image=True):
"""
IoU for foreground class
binary: 1 foreground, 0 background
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
intersection = ((label == 1) & (pred == 1)).sum()
union = ((label == 1) | ((pred == 1) & (label != ignore))).sum()
if not union:
iou = EMPTY
else:
iou = float(intersection) / union
ious.append(iou)
iou = mean(ious) # mean accross images if per_image
return 100 * iou |
Python | def iou(preds, labels, C, EMPTY=1.0, ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
for i in range(C):
if (i != ignore): # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | ((pred == i) & (label != ignore))).sum()
if not union:
iou.append(EMPTY)
else:
iou.append(float(intersection) / union)
ious.append(iou)
ious = map(mean, zip(*ious)) # mean accross images if per_image
return 100 * np.array(ious) | def iou(preds, labels, C, EMPTY=1.0, ignore=None, per_image=False):
"""
Array of IoU for each (non ignored) class
"""
if not per_image:
preds, labels = (preds,), (labels,)
ious = []
for pred, label in zip(preds, labels):
iou = []
for i in range(C):
if (i != ignore): # The ignored label is sometimes among predicted classes (ENet - CityScapes)
intersection = ((label == i) & (pred == i)).sum()
union = ((label == i) | ((pred == i) & (label != ignore))).sum()
if not union:
iou.append(EMPTY)
else:
iou.append(float(intersection) / union)
ious.append(iou)
ious = map(mean, zip(*ious)) # mean accross images if per_image
return 100 * np.array(ious) |
Python | def convert_array_to_multichannel(in_arr, n_channels=7, burnValue=255,
append_total_band=False, verbose=False):
'''Take input array with multiple values, and make each value a unique
channel. Assume a zero value is background, while value of 1 is the
first channel, 2 the second channel, etc.'''
h, w = in_arr.shape[:2]
# scikit image wants it in this format by default
out_arr = np.zeros((n_channels, h, w), dtype=np.uint8)
# out_arr = np.zeros((h,w,n_channels), dtype=np.uint8)
for band in range(n_channels):
val = band + 1
band_out = np.zeros((h, w), dtype=np.uint8)
if verbose:
print("band:", band)
band_arr_bool = np.where(in_arr == val)
band_out[band_arr_bool] = burnValue
out_arr[band, :, :] = band_out
# out_arr[:,:,band] = band_out
if append_total_band:
tot_band = np.zeros((h, w), dtype=np.uint8)
band_arr_bool = np.where(in_arr > 0)
tot_band[band_arr_bool] = burnValue
tot_band = tot_band.reshape(1, h, w)
out_arr = np.concatenate((out_arr, tot_band), axis=0).astype(np.uint8)
if verbose:
print("out_arr.shape:", out_arr.shape)
return out_arr | def convert_array_to_multichannel(in_arr, n_channels=7, burnValue=255,
append_total_band=False, verbose=False):
'''Take input array with multiple values, and make each value a unique
channel. Assume a zero value is background, while value of 1 is the
first channel, 2 the second channel, etc.'''
h, w = in_arr.shape[:2]
# scikit image wants it in this format by default
out_arr = np.zeros((n_channels, h, w), dtype=np.uint8)
# out_arr = np.zeros((h,w,n_channels), dtype=np.uint8)
for band in range(n_channels):
val = band + 1
band_out = np.zeros((h, w), dtype=np.uint8)
if verbose:
print("band:", band)
band_arr_bool = np.where(in_arr == val)
band_out[band_arr_bool] = burnValue
out_arr[band, :, :] = band_out
# out_arr[:,:,band] = band_out
if append_total_band:
tot_band = np.zeros((h, w), dtype=np.uint8)
band_arr_bool = np.where(in_arr > 0)
tot_band[band_arr_bool] = burnValue
tot_band = tot_band.reshape(1, h, w)
out_arr = np.concatenate((out_arr, tot_band), axis=0).astype(np.uint8)
if verbose:
print("out_arr.shape:", out_arr.shape)
return out_arr |
Python | def speed_mask_dir(geojson_dir, image_dir, output_dir,
speed_to_burn_func,
mask_burn_val_key='burnValue',
buffer_distance_meters=2,
buffer_roundness=1,
dissolve_by='speed_m/s',
bin_conversion_key='speed_mph',
verbose=True,
# below here is all variables for binned speed
output_dir_multidim='',
channel_value_mult=1,
n_channels=8,
channel_burnValue=255,
append_total_band=True,
):
"""Create continuous speed masks for entire dir"""
images = sorted([z for z in os.listdir(image_dir) if z.endswith('.tif')])
for j, image_name in enumerate(images):
image_root = image_name.split('.')[0]
# image_root = image_name.split('RGB-PanSharpen_')[-1].split('.')[0]
image_path = os.path.join(image_dir, image_name)
mask_path_out = os.path.join(output_dir, image_name)
# Get geojson path
# SpaceNet chips
geojson_path = os.path.join(
geojson_dir, image_root.replace('PS-RGB', 'geojson_roads_speed').replace('PS-MS', 'geojson_roads_speed')
# geojson_dir, image_root.replace('PS-RGB', 'geojson_roads_speed')
+ '.geojson')
# # Contiguous files
# geojson_path = os.path.join(geojson_dir, image_root + '.geojson')
# if (j % 100) == 0:
if (j % 1) == 0:
print(j + 1, "/", len(images), "image:", image_name,
"geojson:", geojson_path)
if j > 0:
verbose = False
gdf_buffer = road_speed.create_speed_gdf(
image_path, geojson_path, mask_path_out, speed_to_burn_func,
mask_burn_val_key=mask_burn_val_key,
buffer_distance_meters=buffer_distance_meters,
buffer_roundness=buffer_roundness,
dissolve_by=dissolve_by,
bin_conversion_key=bin_conversion_key,
verbose=verbose)
# If Binning...
if output_dir_multidim:
mask_path_out_md = os.path.join(output_dir_multidim, image_name)
# Convert array to a multi-channel image
mask_bins = skimage.io.imread(mask_path_out)
mask_bins = (mask_bins / channel_value_mult).astype(int)
if verbose:
print("mask_bins.shape:", mask_bins.shape)
print("np unique mask_bins:", np.unique(mask_bins))
# print ("mask_bins:", mask_bins)
# define mask_channels
if np.max(mask_bins) == 0:
h, w = skimage.io.imread(mask_path_out).shape[:2]
# h, w = cv2.imread(mask_path_out, 0).shape[:2]
if append_total_band:
mask_channels = np.zeros((n_channels + 1, h, w)).astype(np.uint8)
else:
mask_channels = np.zeros((n_channels, h, w)).astype(np.uint8)
else:
mask_channels = convert_array_to_multichannel(
mask_bins, n_channels=n_channels,
burnValue=channel_burnValue,
append_total_band=append_total_band,
verbose=verbose)
if verbose:
print("mask_channels.shape:", mask_channels.shape)
print("mask_channels.dtype:", mask_channels.dtype)
# write to file
# skimage version...
# skimage.io.imsave(mask_path_out_md, mask_channels, compress=1) # , plugin='tifffile')
# gdal version
CreateMultiBandGeoTiff(mask_path_out_md, mask_channels) | def speed_mask_dir(geojson_dir, image_dir, output_dir,
speed_to_burn_func,
mask_burn_val_key='burnValue',
buffer_distance_meters=2,
buffer_roundness=1,
dissolve_by='speed_m/s',
bin_conversion_key='speed_mph',
verbose=True,
# below here is all variables for binned speed
output_dir_multidim='',
channel_value_mult=1,
n_channels=8,
channel_burnValue=255,
append_total_band=True,
):
"""Create continuous speed masks for entire dir"""
images = sorted([z for z in os.listdir(image_dir) if z.endswith('.tif')])
for j, image_name in enumerate(images):
image_root = image_name.split('.')[0]
# image_root = image_name.split('RGB-PanSharpen_')[-1].split('.')[0]
image_path = os.path.join(image_dir, image_name)
mask_path_out = os.path.join(output_dir, image_name)
# Get geojson path
# SpaceNet chips
geojson_path = os.path.join(
geojson_dir, image_root.replace('PS-RGB', 'geojson_roads_speed').replace('PS-MS', 'geojson_roads_speed')
# geojson_dir, image_root.replace('PS-RGB', 'geojson_roads_speed')
+ '.geojson')
# # Contiguous files
# geojson_path = os.path.join(geojson_dir, image_root + '.geojson')
# if (j % 100) == 0:
if (j % 1) == 0:
print(j + 1, "/", len(images), "image:", image_name,
"geojson:", geojson_path)
if j > 0:
verbose = False
gdf_buffer = road_speed.create_speed_gdf(
image_path, geojson_path, mask_path_out, speed_to_burn_func,
mask_burn_val_key=mask_burn_val_key,
buffer_distance_meters=buffer_distance_meters,
buffer_roundness=buffer_roundness,
dissolve_by=dissolve_by,
bin_conversion_key=bin_conversion_key,
verbose=verbose)
# If Binning...
if output_dir_multidim:
mask_path_out_md = os.path.join(output_dir_multidim, image_name)
# Convert array to a multi-channel image
mask_bins = skimage.io.imread(mask_path_out)
mask_bins = (mask_bins / channel_value_mult).astype(int)
if verbose:
print("mask_bins.shape:", mask_bins.shape)
print("np unique mask_bins:", np.unique(mask_bins))
# print ("mask_bins:", mask_bins)
# define mask_channels
if np.max(mask_bins) == 0:
h, w = skimage.io.imread(mask_path_out).shape[:2]
# h, w = cv2.imread(mask_path_out, 0).shape[:2]
if append_total_band:
mask_channels = np.zeros((n_channels + 1, h, w)).astype(np.uint8)
else:
mask_channels = np.zeros((n_channels, h, w)).astype(np.uint8)
else:
mask_channels = convert_array_to_multichannel(
mask_bins, n_channels=n_channels,
burnValue=channel_burnValue,
append_total_band=append_total_band,
verbose=verbose)
if verbose:
print("mask_channels.shape:", mask_channels.shape)
print("mask_channels.dtype:", mask_channels.dtype)
# write to file
# skimage version...
# skimage.io.imsave(mask_path_out_md, mask_channels, compress=1) # , plugin='tifffile')
# gdal version
CreateMultiBandGeoTiff(mask_path_out_md, mask_channels) |
Python | def speed_to_burn_func(speed):
'''Convert speed estimate to mask burn value between
0 and mask_max'''
bw = mask_max - min_road_burn_val
burn_val = min(
min_road_burn_val + bw * ((speed - min_speed_contin) / (max_speed_contin - min_speed_contin)), mask_max)
return max(burn_val, min_road_burn_val) | def speed_to_burn_func(speed):
'''Convert speed estimate to mask burn value between
0 and mask_max'''
bw = mask_max - min_road_burn_val
burn_val = min(
min_road_burn_val + bw * ((speed - min_speed_contin) / (max_speed_contin - min_speed_contin)), mask_max)
return max(burn_val, min_road_burn_val) |
Python | def speed_to_burn_func(speed_mph):
'''bin every 10 mph or so
Convert speed estimate to appropriate channel
bin = 0 if speed = 0'''
bins = [10, 15, 18.75, 20, 25, 30, 35, 45, 55, 65]
return (1 + bisect_left(bins, speed_mph)) * channel_value_mult
# determine num_channels | def speed_to_burn_func(speed_mph):
'''bin every 10 mph or so
Convert speed estimate to appropriate channel
bin = 0 if speed = 0'''
bins = [10, 15, 18.75, 20, 25, 30, 35, 45, 55, 65]
return (1 + bisect_left(bins, speed_mph)) * channel_value_mult
# determine num_channels |
Python | def pkl_dir_to_wkt(pkl_dir, output_csv_path='',
weight_keys=['length', 'travel_time_s'],
verbose=False):
"""
Create submission wkt from directory full of graph pickles
"""
wkt_list = []
pkl_list = sorted([z for z in os.listdir(pkl_dir) if z.endswith('.gpickle')])
for i, pkl_name in enumerate(pkl_list):
G = nx.read_gpickle(os.path.join(pkl_dir, pkl_name))
# ensure an undirected graph
print(i, "/", len(pkl_list), "num G.nodes:", len(G.nodes()))
name_root = pkl_name.replace('PS-RGB_', '').replace('PS-MS_', '').split('.')[0]
# AOI_root = 'AOI' + pkl_name.split('AOI')[-1]
# name_root = AOI_root.split('.')[0].replace('PS-RGB_', '')
print("name_root:", name_root)
# if empty, still add to submission
if len(G.nodes()) == 0:
wkt_item_root = [name_root, 'LINESTRING EMPTY']
if len(weight_keys) > 0:
weights = [0 for w in weight_keys]
wkt_list.append(wkt_item_root + weights)
else:
wkt_list.append(wkt_item_root)
# extract geometry pix wkt, save to list
seen_edges = set([])
for i, (u, v, attr_dict) in enumerate(G.edges(data=True)):
# make sure we haven't already seen this edge
if (u, v) in seen_edges or (v, u) in seen_edges:
print(u, v, "already catalogued!")
continue
else:
seen_edges.add((u, v))
seen_edges.add((v, u))
geom_pix_wkt = attr_dict['geometry_pix'].wkt
# check edge lnegth
if attr_dict['length'] > 5000:
print("Edge too long!, u,v,data:", u,v,attr_dict)
return
if verbose:
print(i, "/", len(G.edges()), "u, v:", u, v)
print(" attr_dict:", attr_dict)
print(" geom_pix_wkt:", geom_pix_wkt)
wkt_item_root = [name_root, geom_pix_wkt]
if len(weight_keys) > 0:
weights = [attr_dict[w] for w in weight_keys]
if verbose:
print(" weights:", weights)
wkt_list.append(wkt_item_root + weights)
else:
wkt_list.append(wkt_item_root)
if verbose:
print("wkt_list:", wkt_list)
# create dataframe
if len(weight_keys) > 0:
cols = ['ImageId', 'WKT_Pix'] + weight_keys
else:
cols = ['ImageId', 'WKT_Pix']
# use 'length_m' and 'travel_time_s' instead?
cols_new = []
for z in cols:
if z == 'length':
cols_new.append('length_m')
elif z == 'travel_time':
cols_new.append('travel_time_s')
else:
cols_new.append(z)
cols = cols_new
# cols = [z.replace('length', 'length_m') for z in cols]
# cols = [z.replace('travel_time', 'travel_time_s') for z in cols]
print("cols:", cols)
df = pd.DataFrame(wkt_list, columns=cols)
print("df:", df)
# save
if len(output_csv_path) > 0:
df.to_csv(output_csv_path, index=False)
return df | def pkl_dir_to_wkt(pkl_dir, output_csv_path='',
weight_keys=['length', 'travel_time_s'],
verbose=False):
"""
Create submission wkt from directory full of graph pickles
"""
wkt_list = []
pkl_list = sorted([z for z in os.listdir(pkl_dir) if z.endswith('.gpickle')])
for i, pkl_name in enumerate(pkl_list):
G = nx.read_gpickle(os.path.join(pkl_dir, pkl_name))
# ensure an undirected graph
print(i, "/", len(pkl_list), "num G.nodes:", len(G.nodes()))
name_root = pkl_name.replace('PS-RGB_', '').replace('PS-MS_', '').split('.')[0]
# AOI_root = 'AOI' + pkl_name.split('AOI')[-1]
# name_root = AOI_root.split('.')[0].replace('PS-RGB_', '')
print("name_root:", name_root)
# if empty, still add to submission
if len(G.nodes()) == 0:
wkt_item_root = [name_root, 'LINESTRING EMPTY']
if len(weight_keys) > 0:
weights = [0 for w in weight_keys]
wkt_list.append(wkt_item_root + weights)
else:
wkt_list.append(wkt_item_root)
# extract geometry pix wkt, save to list
seen_edges = set([])
for i, (u, v, attr_dict) in enumerate(G.edges(data=True)):
# make sure we haven't already seen this edge
if (u, v) in seen_edges or (v, u) in seen_edges:
print(u, v, "already catalogued!")
continue
else:
seen_edges.add((u, v))
seen_edges.add((v, u))
geom_pix_wkt = attr_dict['geometry_pix'].wkt
# check edge lnegth
if attr_dict['length'] > 5000:
print("Edge too long!, u,v,data:", u,v,attr_dict)
return
if verbose:
print(i, "/", len(G.edges()), "u, v:", u, v)
print(" attr_dict:", attr_dict)
print(" geom_pix_wkt:", geom_pix_wkt)
wkt_item_root = [name_root, geom_pix_wkt]
if len(weight_keys) > 0:
weights = [attr_dict[w] for w in weight_keys]
if verbose:
print(" weights:", weights)
wkt_list.append(wkt_item_root + weights)
else:
wkt_list.append(wkt_item_root)
if verbose:
print("wkt_list:", wkt_list)
# create dataframe
if len(weight_keys) > 0:
cols = ['ImageId', 'WKT_Pix'] + weight_keys
else:
cols = ['ImageId', 'WKT_Pix']
# use 'length_m' and 'travel_time_s' instead?
cols_new = []
for z in cols:
if z == 'length':
cols_new.append('length_m')
elif z == 'travel_time':
cols_new.append('travel_time_s')
else:
cols_new.append(z)
cols = cols_new
# cols = [z.replace('length', 'length_m') for z in cols]
# cols = [z.replace('travel_time', 'travel_time_s') for z in cols]
print("cols:", cols)
df = pd.DataFrame(wkt_list, columns=cols)
print("df:", df)
# save
if len(output_csv_path) > 0:
df.to_csv(output_csv_path, index=False)
return df |
Python | def clean_sub_graphs(G_, min_length=150, max_nodes_to_skip=30,
weight='length_pix', verbose=False,
super_verbose=False):
'''Remove subgraphs with a max path length less than min_length,
if the subgraph has more than max_noxes_to_skip, don't check length
(this step great improves processing time)'''
if len(list(G_.nodes())) == 0:
return G_
print("Running clean_sub_graphs...")
sub_graphs = list(nx.connected_component_subgraphs(G_))
bad_nodes = []
if verbose:
print(" len(G_.nodes()):", len(list(G_.nodes())))
print(" len(G_.edges()):", len(list(G_.edges())))
if super_verbose:
print("G_.nodes:", G_.nodes())
edge_tmp = G_.edges()[np.random.randint(len(G_.edges()))]
print(edge_tmp, "G.edge props:", G_.edge[edge_tmp[0]][edge_tmp[1]])
for G_sub in sub_graphs:
# don't check length if too many nodes in subgraph
if len(G_sub.nodes()) > max_nodes_to_skip:
continue
else:
all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))
if super_verbose:
print(" \nGs.nodes:", G_sub.nodes())
print(" all_lengths:", all_lengths)
# get all lenghts
lens = []
# for u,v in all_lengths.iteritems():
for u in all_lengths.keys():
v = all_lengths[u]
# for uprime, vprime in v.iteritems():
for uprime in v.keys():
vprime = v[uprime]
lens.append(vprime)
if super_verbose:
print(" u, v", u, v)
print(" uprime, vprime:", uprime, vprime)
max_len = np.max(lens)
if super_verbose:
print(" Max length of path:", max_len)
if max_len < min_length:
bad_nodes.extend(G_sub.nodes())
if super_verbose:
print(" appending to bad_nodes:", G_sub.nodes())
# remove bad_nodes
G_.remove_nodes_from(bad_nodes)
if verbose:
print(" num bad_nodes:", len(bad_nodes))
# print ("bad_nodes:", bad_nodes)
print(" len(G'.nodes()):", len(G_.nodes()))
print(" len(G'.edges()):", len(G_.edges()))
if super_verbose:
print(" G_.nodes:", G_.nodes())
return G_ | def clean_sub_graphs(G_, min_length=150, max_nodes_to_skip=30,
weight='length_pix', verbose=False,
super_verbose=False):
'''Remove subgraphs with a max path length less than min_length,
if the subgraph has more than max_noxes_to_skip, don't check length
(this step great improves processing time)'''
if len(list(G_.nodes())) == 0:
return G_
print("Running clean_sub_graphs...")
sub_graphs = list(nx.connected_component_subgraphs(G_))
bad_nodes = []
if verbose:
print(" len(G_.nodes()):", len(list(G_.nodes())))
print(" len(G_.edges()):", len(list(G_.edges())))
if super_verbose:
print("G_.nodes:", G_.nodes())
edge_tmp = G_.edges()[np.random.randint(len(G_.edges()))]
print(edge_tmp, "G.edge props:", G_.edge[edge_tmp[0]][edge_tmp[1]])
for G_sub in sub_graphs:
# don't check length if too many nodes in subgraph
if len(G_sub.nodes()) > max_nodes_to_skip:
continue
else:
all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))
if super_verbose:
print(" \nGs.nodes:", G_sub.nodes())
print(" all_lengths:", all_lengths)
# get all lenghts
lens = []
# for u,v in all_lengths.iteritems():
for u in all_lengths.keys():
v = all_lengths[u]
# for uprime, vprime in v.iteritems():
for uprime in v.keys():
vprime = v[uprime]
lens.append(vprime)
if super_verbose:
print(" u, v", u, v)
print(" uprime, vprime:", uprime, vprime)
max_len = np.max(lens)
if super_verbose:
print(" Max length of path:", max_len)
if max_len < min_length:
bad_nodes.extend(G_sub.nodes())
if super_verbose:
print(" appending to bad_nodes:", G_sub.nodes())
# remove bad_nodes
G_.remove_nodes_from(bad_nodes)
if verbose:
print(" num bad_nodes:", len(bad_nodes))
# print ("bad_nodes:", bad_nodes)
print(" len(G'.nodes()):", len(G_.nodes()))
print(" len(G'.edges()):", len(G_.edges()))
if super_verbose:
print(" G_.nodes:", G_.nodes())
return G_ |
Python | def wkt_list_to_nodes_edges(wkt_list):
'''Convert wkt list to nodes and edges
Make an edge between each node in linestring. Since one linestring
may contain multiple edges, this is the safest approach'''
node_loc_set = set() # set of edge locations
node_loc_dic = {} # key = node idx, val = location
node_loc_dic_rev = {} # key = location, val = node idx
edge_loc_set = set() # set of edge locations
edge_dic = {} # edge properties
node_iter = 0
edge_iter = 0
for i, lstring in enumerate(wkt_list):
# get lstring properties
shape = shapely.wkt.loads(lstring)
xs, ys = shape.coords.xy
length_orig = shape.length
# iterate through coords in line to create edges between every point
for j, (x, y) in enumerate(zip(xs, ys)):
loc = (x, y)
# for first item just make node, not edge
if j == 0:
# if not yet seen, create new node
if loc not in node_loc_set:
node_loc_set.add(loc)
node_loc_dic[node_iter] = loc
node_loc_dic_rev[loc] = node_iter
node = node_iter
node_iter += 1
# if not first node in edge, retrieve previous node and build edge
else:
prev_loc = (xs[j - 1], ys[j - 1])
# print ("prev_loc:", prev_loc)
prev_node = node_loc_dic_rev[prev_loc]
# if new, create new node
if loc not in node_loc_set:
node_loc_set.add(loc)
node_loc_dic[node_iter] = loc
node_loc_dic_rev[loc] = node_iter
node = node_iter
node_iter += 1
# if seen before, retrieve node properties
else:
node = node_loc_dic_rev[loc]
# add edge, which is start_node to end_node
edge_loc = (loc, prev_loc)
edge_loc_rev = (prev_loc, loc)
# shouldn't be duplicate edges, so break if we see one
if (edge_loc in edge_loc_set) or (edge_loc_rev in edge_loc_set):
print("Oops, edge already seen, returning:", edge_loc)
continue
# get distance to prev_loc and current loc
proj_prev = shape.project(Point(prev_loc))
proj = shape.project(Point(loc))
# edge length is the diffence of the two projected lengths
# along the linestring
edge_length = abs(proj - proj_prev)
# make linestring
line_out = LineString([prev_loc, loc])
line_out_wkt = line_out.wkt
edge_props = {'start': prev_node,
'start_loc_pix': prev_loc,
'end': node,
'end_loc_pix': loc,
'length_pix': edge_length,
'wkt_pix': line_out_wkt,
'geometry_pix': line_out,
'osmid': i}
# print ("edge_props", edge_props)
edge_loc_set.add(edge_loc)
edge_dic[edge_iter] = edge_props
edge_iter += 1
return node_loc_dic, edge_dic | def wkt_list_to_nodes_edges(wkt_list):
'''Convert wkt list to nodes and edges
Make an edge between each node in linestring. Since one linestring
may contain multiple edges, this is the safest approach'''
node_loc_set = set() # set of edge locations
node_loc_dic = {} # key = node idx, val = location
node_loc_dic_rev = {} # key = location, val = node idx
edge_loc_set = set() # set of edge locations
edge_dic = {} # edge properties
node_iter = 0
edge_iter = 0
for i, lstring in enumerate(wkt_list):
# get lstring properties
shape = shapely.wkt.loads(lstring)
xs, ys = shape.coords.xy
length_orig = shape.length
# iterate through coords in line to create edges between every point
for j, (x, y) in enumerate(zip(xs, ys)):
loc = (x, y)
# for first item just make node, not edge
if j == 0:
# if not yet seen, create new node
if loc not in node_loc_set:
node_loc_set.add(loc)
node_loc_dic[node_iter] = loc
node_loc_dic_rev[loc] = node_iter
node = node_iter
node_iter += 1
# if not first node in edge, retrieve previous node and build edge
else:
prev_loc = (xs[j - 1], ys[j - 1])
# print ("prev_loc:", prev_loc)
prev_node = node_loc_dic_rev[prev_loc]
# if new, create new node
if loc not in node_loc_set:
node_loc_set.add(loc)
node_loc_dic[node_iter] = loc
node_loc_dic_rev[loc] = node_iter
node = node_iter
node_iter += 1
# if seen before, retrieve node properties
else:
node = node_loc_dic_rev[loc]
# add edge, which is start_node to end_node
edge_loc = (loc, prev_loc)
edge_loc_rev = (prev_loc, loc)
# shouldn't be duplicate edges, so break if we see one
if (edge_loc in edge_loc_set) or (edge_loc_rev in edge_loc_set):
print("Oops, edge already seen, returning:", edge_loc)
continue
# get distance to prev_loc and current loc
proj_prev = shape.project(Point(prev_loc))
proj = shape.project(Point(loc))
# edge length is the diffence of the two projected lengths
# along the linestring
edge_length = abs(proj - proj_prev)
# make linestring
line_out = LineString([prev_loc, loc])
line_out_wkt = line_out.wkt
edge_props = {'start': prev_node,
'start_loc_pix': prev_loc,
'end': node,
'end_loc_pix': loc,
'length_pix': edge_length,
'wkt_pix': line_out_wkt,
'geometry_pix': line_out,
'osmid': i}
# print ("edge_props", edge_props)
edge_loc_set.add(edge_loc)
edge_dic[edge_iter] = edge_props
edge_iter += 1
return node_loc_dic, edge_dic |
Python | def clean_sub_graphs(G_,
min_length=150,
max_nodes_to_skip=30,
weight='length_pix',
verbose=True,
super_verbose=False):
'''Remove subgraphs with a max path length less than min_length,
if the subgraph has more than max_noxes_to_skip, don't check length
(this step great improves processing time)'''
if len(list(G_.nodes())) == 0:
return G_
if verbose:
print ("Running clean_sub_graphs...")
sub_graphs = list(nx.connected_component_subgraphs(G_))
bad_nodes = []
if verbose:
print (" len(G_.nodes()):", len(list(G_.nodes())) )
print (" len(G_.edges()):", len(list(G_.edges())) )
if super_verbose:
print ("G_.nodes:", G_.nodes())
edge_tmp = G_.edges()[np.random.randint(len(G_.edges()))]
print (edge_tmp, "G.edge props:", G_.edge[edge_tmp[0]][edge_tmp[1]])
for G_sub in sub_graphs:
# don't check length if too many nodes in subgraph
if len(G_sub.nodes()) > max_nodes_to_skip:
continue
else:
all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))
if super_verbose:
print (" \nGs.nodes:", G_sub.nodes() )
print (" all_lengths:", all_lengths )
# get all lenghts
lens = []
#for u,v in all_lengths.iteritems():
for u in all_lengths.keys():
v = all_lengths[u]
#for uprime, vprime in v.iteritems():
for uprime in v.keys():
vprime = v[uprime]
lens.append(vprime)
if super_verbose:
print (" u, v", u,v )
print (" uprime, vprime:", uprime, vprime )
max_len = np.max(lens)
if super_verbose:
print (" Max length of path:", max_len)
if max_len < min_length:
bad_nodes.extend(G_sub.nodes())
if super_verbose:
print (" appending to bad_nodes:", G_sub.nodes())
# remove bad_nodes
G_.remove_nodes_from(bad_nodes)
if verbose:
print (" num bad_nodes:", len(bad_nodes))
#print ("bad_nodes:", bad_nodes)
print (" len(G'.nodes()):", len(G_.nodes()))
print (" len(G'.edges()):", len(G_.edges()))
if super_verbose:
print (" G_.nodes:", G_.nodes())
return G_ | def clean_sub_graphs(G_,
min_length=150,
max_nodes_to_skip=30,
weight='length_pix',
verbose=True,
super_verbose=False):
'''Remove subgraphs with a max path length less than min_length,
if the subgraph has more than max_noxes_to_skip, don't check length
(this step great improves processing time)'''
if len(list(G_.nodes())) == 0:
return G_
if verbose:
print ("Running clean_sub_graphs...")
sub_graphs = list(nx.connected_component_subgraphs(G_))
bad_nodes = []
if verbose:
print (" len(G_.nodes()):", len(list(G_.nodes())) )
print (" len(G_.edges()):", len(list(G_.edges())) )
if super_verbose:
print ("G_.nodes:", G_.nodes())
edge_tmp = G_.edges()[np.random.randint(len(G_.edges()))]
print (edge_tmp, "G.edge props:", G_.edge[edge_tmp[0]][edge_tmp[1]])
for G_sub in sub_graphs:
# don't check length if too many nodes in subgraph
if len(G_sub.nodes()) > max_nodes_to_skip:
continue
else:
all_lengths = dict(nx.all_pairs_dijkstra_path_length(G_sub, weight=weight))
if super_verbose:
print (" \nGs.nodes:", G_sub.nodes() )
print (" all_lengths:", all_lengths )
# get all lenghts
lens = []
#for u,v in all_lengths.iteritems():
for u in all_lengths.keys():
v = all_lengths[u]
#for uprime, vprime in v.iteritems():
for uprime in v.keys():
vprime = v[uprime]
lens.append(vprime)
if super_verbose:
print (" u, v", u,v )
print (" uprime, vprime:", uprime, vprime )
max_len = np.max(lens)
if super_verbose:
print (" Max length of path:", max_len)
if max_len < min_length:
bad_nodes.extend(G_sub.nodes())
if super_verbose:
print (" appending to bad_nodes:", G_sub.nodes())
# remove bad_nodes
G_.remove_nodes_from(bad_nodes)
if verbose:
print (" num bad_nodes:", len(bad_nodes))
#print ("bad_nodes:", bad_nodes)
print (" len(G'.nodes()):", len(G_.nodes()))
print (" len(G'.edges()):", len(G_.edges()))
if super_verbose:
print (" G_.nodes:", G_.nodes())
return G_ |
Python | def convert_pix_lstring_to_geo(wkt_lstring, im_file):
'''Convert linestring in pixel coords to geo coords'''
shape = wkt_lstring #shapely.wkt.loads(lstring)
x_pixs, y_pixs = shape.coords.xy
coords_latlon = []
coords_utm = []
for (x,y) in zip (x_pixs, y_pixs):
lon, lat = pixelToGeoCoord(x, y, im_file)
[utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)
coords_utm.append([utm_east, utm_north])
coords_latlon.append([lon, lat])
lstring_latlon = LineString([Point(z) for z in coords_latlon])
lstring_utm = LineString([Point(z) for z in coords_utm])
return lstring_latlon, lstring_utm, utm_zone, utm_letter | def convert_pix_lstring_to_geo(wkt_lstring, im_file):
'''Convert linestring in pixel coords to geo coords'''
shape = wkt_lstring #shapely.wkt.loads(lstring)
x_pixs, y_pixs = shape.coords.xy
coords_latlon = []
coords_utm = []
for (x,y) in zip (x_pixs, y_pixs):
lon, lat = pixelToGeoCoord(x, y, im_file)
[utm_east, utm_north, utm_zone, utm_letter] = utm.from_latlon(lat, lon)
coords_utm.append([utm_east, utm_north])
coords_latlon.append([lon, lat])
lstring_latlon = LineString([Point(z) for z in coords_latlon])
lstring_utm = LineString([Point(z) for z in coords_utm])
return lstring_latlon, lstring_utm, utm_zone, utm_letter |
Python | def convert_to_8Bit(inputRaster, outputRaster,
outputPixType='Byte',
outputFormat='GTiff',
rescale_type='rescale',
percentiles=[2, 98]):
'''
Convert 16bit image to 8bit
rescale_type = [clip, rescale]
if clip, scaling is done strictly between 0 65535
if rescale, each band is rescaled to a min and max
set by percentiles
'''
srcRaster = gdal.Open(inputRaster)
cmd = ['gdal_translate', '-ot', outputPixType, '-of',
outputFormat]
# iterate through bands
for bandId in range(srcRaster.RasterCount):
bandId = bandId + 1
band = srcRaster.GetRasterBand(bandId)
if rescale_type == 'rescale':
bmin = band.GetMinimum()
bmax = band.GetMaximum()
# if not exist minimum and maximum values
if bmin is None or bmax is None:
(bmin, bmax) = band.ComputeRasterMinMax(1)
# else, rescale
band_arr_tmp = band.ReadAsArray()
bmin = np.percentile(band_arr_tmp.flatten(),
percentiles[0])
bmax = np.percentile(band_arr_tmp.flatten(),
percentiles[1])
else:
bmin, bmax = 0, 65535
cmd.append('-scale_{}'.format(bandId))
cmd.append('{}'.format(bmin))
cmd.append('{}'.format(bmax))
cmd.append('{}'.format(0))
cmd.append('{}'.format(255))
cmd.append(inputRaster)
cmd.append(outputRaster)
# print("Conversin command:", cmd)
subprocess.call(cmd)
return | def convert_to_8Bit(inputRaster, outputRaster,
outputPixType='Byte',
outputFormat='GTiff',
rescale_type='rescale',
percentiles=[2, 98]):
'''
Convert 16bit image to 8bit
rescale_type = [clip, rescale]
if clip, scaling is done strictly between 0 65535
if rescale, each band is rescaled to a min and max
set by percentiles
'''
srcRaster = gdal.Open(inputRaster)
cmd = ['gdal_translate', '-ot', outputPixType, '-of',
outputFormat]
# iterate through bands
for bandId in range(srcRaster.RasterCount):
bandId = bandId + 1
band = srcRaster.GetRasterBand(bandId)
if rescale_type == 'rescale':
bmin = band.GetMinimum()
bmax = band.GetMaximum()
# if not exist minimum and maximum values
if bmin is None or bmax is None:
(bmin, bmax) = band.ComputeRasterMinMax(1)
# else, rescale
band_arr_tmp = band.ReadAsArray()
bmin = np.percentile(band_arr_tmp.flatten(),
percentiles[0])
bmax = np.percentile(band_arr_tmp.flatten(),
percentiles[1])
else:
bmin, bmax = 0, 65535
cmd.append('-scale_{}'.format(bandId))
cmd.append('{}'.format(bmin))
cmd.append('{}'.format(bmax))
cmd.append('{}'.format(0))
cmd.append('{}'.format(255))
cmd.append(inputRaster)
cmd.append(outputRaster)
# print("Conversin command:", cmd)
subprocess.call(cmd)
return |
Python | def graph_to_gdfs_pix(G, nodes=True, edges=True, node_geometry=True, fill_edge_geometry=True):
"""
Convert a graph into node and/or edge GeoDataFrames
Parameters
----------
G : networkx multidigraph
nodes : bool
if True, convert graph nodes to a GeoDataFrame and return it
edges : bool
if True, convert graph edges to a GeoDataFrame and return it
node_geometry : bool
if True, create a geometry column from node x and y data
fill_edge_geometry : bool
if True, fill in missing edge geometry fields using origin and
destination nodes
Returns
-------
GeoDataFrame or tuple
gdf_nodes or gdf_edges or both as a tuple
"""
if not (nodes or edges):
raise ValueError('You must request nodes or edges, or both.')
to_return = []
if nodes:
start_time = time.time()
nodes = {node: data for node, data in G.nodes(data=True)}
gdf_nodes = gpd.GeoDataFrame(nodes).T
if node_geometry:
# gdf_nodes['geometry'] = gdf_nodes.apply(lambda row: Point(row['x'], row['y']), axis=1)
gdf_nodes['geometry_pix'] = gdf_nodes.apply(lambda row: Point(row['x_pix'], row['y_pix']), axis=1)
gdf_nodes.crs = G.graph['crs']
gdf_nodes.gdf_name = '{}_nodes'.format(G.graph['name'])
gdf_nodes['osmid'] = gdf_nodes['osmid'].astype(np.int64).map(make_str)
to_return.append(gdf_nodes)
log('Created GeoDataFrame "{}" from graph in {:,.2f} seconds'.format(gdf_nodes.gdf_name,
time.time() - start_time))
if edges:
start_time = time.time()
# create a list to hold our edges, then loop through each edge in the
# graph
edges = []
for u, v, key, data in G.edges(keys=True, data=True):
# for each edge, add key and all attributes in data dict to the
# edge_details
edge_details = {'u': u, 'v': v, 'key': key}
for attr_key in data:
edge_details[attr_key] = data[attr_key]
# if edge doesn't already have a geometry attribute, create one now
# if fill_edge_geometry==True
if 'geometry_pix' not in data:
if fill_edge_geometry:
point_u = Point((G.nodes[u]['x_pix'], G.nodes[u]['y_pix']))
point_v = Point((G.nodes[v]['x_pix'], G.nodes[v]['y_pix']))
edge_details['geometry_pix'] = LineString([point_u, point_v])
else:
edge_details['geometry_pix'] = np.nan
edges.append(edge_details)
# create a GeoDataFrame from the list of edges and set the CRS
gdf_edges = gpd.GeoDataFrame(edges)
gdf_edges.crs = G.graph['crs']
gdf_edges.gdf_name = '{}_edges'.format(G.graph['name'])
to_return.append(gdf_edges)
log('Created GeoDataFrame "{}" from graph in {:,.2f} seconds'.format(gdf_edges.gdf_name,
time.time() - start_time))
if len(to_return) > 1:
return tuple(to_return)
else:
return to_return[0] | def graph_to_gdfs_pix(G, nodes=True, edges=True, node_geometry=True, fill_edge_geometry=True):
"""
Convert a graph into node and/or edge GeoDataFrames
Parameters
----------
G : networkx multidigraph
nodes : bool
if True, convert graph nodes to a GeoDataFrame and return it
edges : bool
if True, convert graph edges to a GeoDataFrame and return it
node_geometry : bool
if True, create a geometry column from node x and y data
fill_edge_geometry : bool
if True, fill in missing edge geometry fields using origin and
destination nodes
Returns
-------
GeoDataFrame or tuple
gdf_nodes or gdf_edges or both as a tuple
"""
if not (nodes or edges):
raise ValueError('You must request nodes or edges, or both.')
to_return = []
if nodes:
start_time = time.time()
nodes = {node: data for node, data in G.nodes(data=True)}
gdf_nodes = gpd.GeoDataFrame(nodes).T
if node_geometry:
# gdf_nodes['geometry'] = gdf_nodes.apply(lambda row: Point(row['x'], row['y']), axis=1)
gdf_nodes['geometry_pix'] = gdf_nodes.apply(lambda row: Point(row['x_pix'], row['y_pix']), axis=1)
gdf_nodes.crs = G.graph['crs']
gdf_nodes.gdf_name = '{}_nodes'.format(G.graph['name'])
gdf_nodes['osmid'] = gdf_nodes['osmid'].astype(np.int64).map(make_str)
to_return.append(gdf_nodes)
log('Created GeoDataFrame "{}" from graph in {:,.2f} seconds'.format(gdf_nodes.gdf_name,
time.time() - start_time))
if edges:
start_time = time.time()
# create a list to hold our edges, then loop through each edge in the
# graph
edges = []
for u, v, key, data in G.edges(keys=True, data=True):
# for each edge, add key and all attributes in data dict to the
# edge_details
edge_details = {'u': u, 'v': v, 'key': key}
for attr_key in data:
edge_details[attr_key] = data[attr_key]
# if edge doesn't already have a geometry attribute, create one now
# if fill_edge_geometry==True
if 'geometry_pix' not in data:
if fill_edge_geometry:
point_u = Point((G.nodes[u]['x_pix'], G.nodes[u]['y_pix']))
point_v = Point((G.nodes[v]['x_pix'], G.nodes[v]['y_pix']))
edge_details['geometry_pix'] = LineString([point_u, point_v])
else:
edge_details['geometry_pix'] = np.nan
edges.append(edge_details)
# create a GeoDataFrame from the list of edges and set the CRS
gdf_edges = gpd.GeoDataFrame(edges)
gdf_edges.crs = G.graph['crs']
gdf_edges.gdf_name = '{}_edges'.format(G.graph['name'])
to_return.append(gdf_edges)
log('Created GeoDataFrame "{}" from graph in {:,.2f} seconds'.format(gdf_edges.gdf_name,
time.time() - start_time))
if len(to_return) > 1:
return tuple(to_return)
else:
return to_return[0] |
Python | def plot_graph_pix(G, im=None, bbox=None, fig_height=6, fig_width=None, margin=0.02,
axis_off=True, equal_aspect=False, bgcolor='w', show=True,
save=False, close=True, file_format='png', filename='temp',
default_dpi=300, annotate=False, node_color='#66ccff', node_size=15,
node_alpha=1, node_edgecolor='none', node_zorder=1,
edge_color='#999999', edge_linewidth=1, edge_alpha=1,
edge_width_key='speed_mph',
edge_width_mult=1. / 25,
use_geom=True):
"""
Plot a networkx spatial graph.
Parameters
----------
G : networkx multidigraph
bbox : tuple
bounding box as north,south,east,west - if None will calculate from
spatial extents of data. if passing a bbox, you probably also want to
pass margin=0 to constrain it.
fig_height : int
matplotlib figure height in inches
fig_width : int
matplotlib figure width in inches
margin : float
relative margin around the figure
axis_off : bool
if True turn off the matplotlib axis
equal_aspect : bool
if True set the axis aspect ratio equal
bgcolor : string
the background color of the figure and axis
show : bool
if True, show the figure
save : bool
if True, save the figure as an image file to disk
close : bool
close the figure (only if show equals False) to prevent display
file_format : string
the format of the file to save (e.g., 'jpg', 'png', 'svg')
filename : string
the name of the file if saving
default_dpi : int
the resolution of the image file if saving (may get altered for
large images)
annotate : bool
if True, annotate the nodes in the figure
node_color : string
the color of the nodes
node_size : int
the size of the nodes
node_alpha : float
the opacity of the nodes
node_edgecolor : string
the color of the node's marker's border
node_zorder : int
zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot
nodes beneath them or 3 to plot nodes atop them
edge_color : string
the color of the edges' lines
edge_linewidth : float
the width of the edges' lines
edge_alpha : float
the opacity of the edges' lines
edge_width_key : str
optional: key in edge propwerties to determine edge width,
supercedes edge_linewidth, default to "speed_mph"
edge_width_mult : float
factor to rescale width for plotting, default to 1./25, which gives
a line width of 1 for 25 mph speed limit.
use_geom : bool
if True, use the spatial geometry attribute of the edges to draw
geographically accurate edges, rather than just lines straight from node
to node
Returns
-------
fig, ax : tuple
"""
log('Begin plotting the graph...')
node_Xs = [float(x) for _, x in G.nodes(data='x_pix')]
node_Ys = [float(y) for _, y in G.nodes(data='y_pix')]
# node_Xs = [float(x) for _, x in G.nodes(data='x')]
# node_Ys = [float(y) for _, y in G.nodes(data='y')]
# get north, south, east, west values either from bbox parameter or from the
# spatial extent of the edges' geometries
if bbox is None:
edges = graph_to_gdfs_pix(G, nodes=False, fill_edge_geometry=True)
# print ("plot_graph_pix():, edges:", edges)
print("plot_graph_pix():, edges.columns:", edges.columns)
# print ("plot_graph_pix(): edges['geometry_pix']:", edges['geometry_pix'])
# print ("plot_graph_pix(): edges['geometry']:", edges['geometry'])
print("type edges['geometry_pix'].:", type(edges['geometry_pix']))
print("type gpd.GeoSeries(edges['geometry_pix']):", type(gpd.GeoSeries(edges['geometry_pix'])))
print("type gpd.GeoSeries(edges['geometry_pix'][0]):", type(gpd.GeoSeries(edges['geometry_pix']).iloc[0]))
west, south, east, north = gpd.GeoSeries(edges['geometry_pix']).total_bounds
# west, south, east, north = edges.total_bounds
else:
north, south, east, west = bbox
# if caller did not pass in a fig_width, calculate it proportionately from
# the fig_height and bounding box aspect ratio
bbox_aspect_ratio = (north - south) / (east - west)
if fig_width is None:
fig_width = fig_height / bbox_aspect_ratio
# create the figure and axis
print("Creating figure and axis...")
if im is not None:
fig, ax = plt.subplots(figsize=(fig_width, fig_height))
ax.imshow(im)
print("im.shape:", im.shape)
# fig, ax = save_and_show(fig, ax, save, show, close, filename, file_format, dpi, axis_off)
# return
else:
fig, ax = plt.subplots(figsize=(fig_width, fig_height), facecolor=bgcolor)
ax.set_facecolor(bgcolor)
## create the figure and axis
# fig, ax = plt.subplots(figsize=(fig_width, fig_height), facecolor=bgcolor)
# ax.set_facecolor(bgcolor)
# draw the edges as lines from node to node
start_time = time.time()
lines = []
widths = []
for u, v, data in G.edges(keys=False, data=True):
if 'geometry_pix' in data and use_geom:
# if it has a geometry attribute (a list of line segments), add them
# to the list of lines to plot
xs, ys = data['geometry_pix'].xy
lines.append(list(zip(xs, ys)))
else:
# if it doesn't have a geometry attribute, the edge is a straight
# line from node to node
x1 = G.nodes[u]['x_pix']
y1 = G.nodes[u]['y_pix']
x2 = G.nodes[v]['x_pix']
y2 = G.nodes[v]['y_pix']
line = [(x1, y1), (x2, y2)]
lines.append(line)
# get widths
if edge_width_key in data:
width = int(np.rint(data[edge_width_key] * edge_width_mult))
else:
width = edge_linewidth
widths.append(width)
# add the lines to the axis as a linecollection
lc = LineCollection(lines, colors=edge_color,
linewidths=widths,
alpha=edge_alpha, zorder=2)
ax.add_collection(lc)
log('Drew the graph edges in {:,.2f} seconds'.format(time.time() - start_time))
# scatter plot the nodes
ax.scatter(node_Xs, node_Ys, s=node_size, c=node_color, alpha=node_alpha,
edgecolor=node_edgecolor, zorder=node_zorder)
# set the extent of the figure
margin_ns = (north - south) * margin
margin_ew = (east - west) * margin
ax.set_ylim((south - margin_ns, north + margin_ns))
ax.set_xlim((west - margin_ew, east + margin_ew))
# configure axis appearance
xaxis = ax.get_xaxis()
yaxis = ax.get_yaxis()
xaxis.get_major_formatter().set_useOffset(False)
yaxis.get_major_formatter().set_useOffset(False)
# if axis_off, turn off the axis display set the margins to zero and point
# the ticks in so there's no space around the plot
if axis_off:
ax.axis('off')
ax.margins(0)
ax.tick_params(which='both', direction='in')
xaxis.set_visible(False)
yaxis.set_visible(False)
fig.canvas.draw()
if equal_aspect:
# make everything square
ax.set_aspect('equal')
fig.canvas.draw()
else:
# if the graph is not projected, conform the aspect ratio to not stretch the plot
if G.graph['crs'] == ox_settings.default_crs:
coslat = np.cos((min(node_Ys) + max(node_Ys)) / 2. / 180. * np.pi)
ax.set_aspect(1. / coslat)
fig.canvas.draw()
# annotate the axis with node IDs if annotate=True
if annotate:
for node, data in G.nodes(data=True):
ax.annotate(node, xy=(data['x_pix'], data['y_pix']))
# update dpi, if image
if im is not None:
# mpl can handle a max of 2^29 pixels, or 23170 on a side
# recompute max_dpi
max_dpi = int(23000 / max(fig_height, fig_width))
h, w = im.shape[:2]
# try to set dpi to native resolution of imagery
desired_dpi = max(default_dpi, 1.0 * h / fig_height)
# desired_dpi = max(default_dpi, int( np.max(im.shape) / max(fig_height, fig_width) ) )
dpi = int(np.min([max_dpi, desired_dpi]))
# save and show the figure as specified
fig, ax = save_and_show(fig, ax, save, show, close, filename,
file_format, dpi, axis_off)
return fig, ax | def plot_graph_pix(G, im=None, bbox=None, fig_height=6, fig_width=None, margin=0.02,
axis_off=True, equal_aspect=False, bgcolor='w', show=True,
save=False, close=True, file_format='png', filename='temp',
default_dpi=300, annotate=False, node_color='#66ccff', node_size=15,
node_alpha=1, node_edgecolor='none', node_zorder=1,
edge_color='#999999', edge_linewidth=1, edge_alpha=1,
edge_width_key='speed_mph',
edge_width_mult=1. / 25,
use_geom=True):
"""
Plot a networkx spatial graph.
Parameters
----------
G : networkx multidigraph
bbox : tuple
bounding box as north,south,east,west - if None will calculate from
spatial extents of data. if passing a bbox, you probably also want to
pass margin=0 to constrain it.
fig_height : int
matplotlib figure height in inches
fig_width : int
matplotlib figure width in inches
margin : float
relative margin around the figure
axis_off : bool
if True turn off the matplotlib axis
equal_aspect : bool
if True set the axis aspect ratio equal
bgcolor : string
the background color of the figure and axis
show : bool
if True, show the figure
save : bool
if True, save the figure as an image file to disk
close : bool
close the figure (only if show equals False) to prevent display
file_format : string
the format of the file to save (e.g., 'jpg', 'png', 'svg')
filename : string
the name of the file if saving
default_dpi : int
the resolution of the image file if saving (may get altered for
large images)
annotate : bool
if True, annotate the nodes in the figure
node_color : string
the color of the nodes
node_size : int
the size of the nodes
node_alpha : float
the opacity of the nodes
node_edgecolor : string
the color of the node's marker's border
node_zorder : int
zorder to plot nodes, edges are always 2, so make node_zorder 1 to plot
nodes beneath them or 3 to plot nodes atop them
edge_color : string
the color of the edges' lines
edge_linewidth : float
the width of the edges' lines
edge_alpha : float
the opacity of the edges' lines
edge_width_key : str
optional: key in edge propwerties to determine edge width,
supercedes edge_linewidth, default to "speed_mph"
edge_width_mult : float
factor to rescale width for plotting, default to 1./25, which gives
a line width of 1 for 25 mph speed limit.
use_geom : bool
if True, use the spatial geometry attribute of the edges to draw
geographically accurate edges, rather than just lines straight from node
to node
Returns
-------
fig, ax : tuple
"""
log('Begin plotting the graph...')
node_Xs = [float(x) for _, x in G.nodes(data='x_pix')]
node_Ys = [float(y) for _, y in G.nodes(data='y_pix')]
# node_Xs = [float(x) for _, x in G.nodes(data='x')]
# node_Ys = [float(y) for _, y in G.nodes(data='y')]
# get north, south, east, west values either from bbox parameter or from the
# spatial extent of the edges' geometries
if bbox is None:
edges = graph_to_gdfs_pix(G, nodes=False, fill_edge_geometry=True)
# print ("plot_graph_pix():, edges:", edges)
print("plot_graph_pix():, edges.columns:", edges.columns)
# print ("plot_graph_pix(): edges['geometry_pix']:", edges['geometry_pix'])
# print ("plot_graph_pix(): edges['geometry']:", edges['geometry'])
print("type edges['geometry_pix'].:", type(edges['geometry_pix']))
print("type gpd.GeoSeries(edges['geometry_pix']):", type(gpd.GeoSeries(edges['geometry_pix'])))
print("type gpd.GeoSeries(edges['geometry_pix'][0]):", type(gpd.GeoSeries(edges['geometry_pix']).iloc[0]))
west, south, east, north = gpd.GeoSeries(edges['geometry_pix']).total_bounds
# west, south, east, north = edges.total_bounds
else:
north, south, east, west = bbox
# if caller did not pass in a fig_width, calculate it proportionately from
# the fig_height and bounding box aspect ratio
bbox_aspect_ratio = (north - south) / (east - west)
if fig_width is None:
fig_width = fig_height / bbox_aspect_ratio
# create the figure and axis
print("Creating figure and axis...")
if im is not None:
fig, ax = plt.subplots(figsize=(fig_width, fig_height))
ax.imshow(im)
print("im.shape:", im.shape)
# fig, ax = save_and_show(fig, ax, save, show, close, filename, file_format, dpi, axis_off)
# return
else:
fig, ax = plt.subplots(figsize=(fig_width, fig_height), facecolor=bgcolor)
ax.set_facecolor(bgcolor)
## create the figure and axis
# fig, ax = plt.subplots(figsize=(fig_width, fig_height), facecolor=bgcolor)
# ax.set_facecolor(bgcolor)
# draw the edges as lines from node to node
start_time = time.time()
lines = []
widths = []
for u, v, data in G.edges(keys=False, data=True):
if 'geometry_pix' in data and use_geom:
# if it has a geometry attribute (a list of line segments), add them
# to the list of lines to plot
xs, ys = data['geometry_pix'].xy
lines.append(list(zip(xs, ys)))
else:
# if it doesn't have a geometry attribute, the edge is a straight
# line from node to node
x1 = G.nodes[u]['x_pix']
y1 = G.nodes[u]['y_pix']
x2 = G.nodes[v]['x_pix']
y2 = G.nodes[v]['y_pix']
line = [(x1, y1), (x2, y2)]
lines.append(line)
# get widths
if edge_width_key in data:
width = int(np.rint(data[edge_width_key] * edge_width_mult))
else:
width = edge_linewidth
widths.append(width)
# add the lines to the axis as a linecollection
lc = LineCollection(lines, colors=edge_color,
linewidths=widths,
alpha=edge_alpha, zorder=2)
ax.add_collection(lc)
log('Drew the graph edges in {:,.2f} seconds'.format(time.time() - start_time))
# scatter plot the nodes
ax.scatter(node_Xs, node_Ys, s=node_size, c=node_color, alpha=node_alpha,
edgecolor=node_edgecolor, zorder=node_zorder)
# set the extent of the figure
margin_ns = (north - south) * margin
margin_ew = (east - west) * margin
ax.set_ylim((south - margin_ns, north + margin_ns))
ax.set_xlim((west - margin_ew, east + margin_ew))
# configure axis appearance
xaxis = ax.get_xaxis()
yaxis = ax.get_yaxis()
xaxis.get_major_formatter().set_useOffset(False)
yaxis.get_major_formatter().set_useOffset(False)
# if axis_off, turn off the axis display set the margins to zero and point
# the ticks in so there's no space around the plot
if axis_off:
ax.axis('off')
ax.margins(0)
ax.tick_params(which='both', direction='in')
xaxis.set_visible(False)
yaxis.set_visible(False)
fig.canvas.draw()
if equal_aspect:
# make everything square
ax.set_aspect('equal')
fig.canvas.draw()
else:
# if the graph is not projected, conform the aspect ratio to not stretch the plot
if G.graph['crs'] == ox_settings.default_crs:
coslat = np.cos((min(node_Ys) + max(node_Ys)) / 2. / 180. * np.pi)
ax.set_aspect(1. / coslat)
fig.canvas.draw()
# annotate the axis with node IDs if annotate=True
if annotate:
for node, data in G.nodes(data=True):
ax.annotate(node, xy=(data['x_pix'], data['y_pix']))
# update dpi, if image
if im is not None:
# mpl can handle a max of 2^29 pixels, or 23170 on a side
# recompute max_dpi
max_dpi = int(23000 / max(fig_height, fig_width))
h, w = im.shape[:2]
# try to set dpi to native resolution of imagery
desired_dpi = max(default_dpi, 1.0 * h / fig_height)
# desired_dpi = max(default_dpi, int( np.max(im.shape) / max(fig_height, fig_width) ) )
dpi = int(np.min([max_dpi, desired_dpi]))
# save and show the figure as specified
fig, ax = save_and_show(fig, ax, save, show, close, filename,
file_format, dpi, axis_off)
return fig, ax |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.